summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2019-01-25 14:09:52 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2019-02-18 09:02:00 -0500
commit465aff5f0d69f71a5fcb47112b59463d8b4f8b30 (patch)
treee64ef7af79868d2c24f9d194585d4f03fb41dad5 /drivers/gpu
parent5e440e63d67058834b17e4cd28d3e5c9e9b8c6e2 (diff)
gpu: nvgpu: do not use raw spinlock for ch->timeout.lock
With PREEMPT_RT kernel, regular spinlocks are mapped onto sleeping spinlocks (rt_mutex locks), and raw spinlocks retain their behaviour. Schedule while atomic can occur in gk20a_channel_timeout_start, as it acquires ch->timeout.lock raw spinlock, and then calls functions that acquire ch->ch_timedout_lock regular spinlock. Bug 200484795 Change-Id: Iacc63195d8ee6a2d571c998da1b4b5d396f49439 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2004100 (cherry picked from commit aacc33bb47aa8019c1a20b867d3722c241f7f93a in dev-kernel) Reviewed-on: https://git-master.nvidia.com/r/2017923 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Debarshi Dutta <ddutta@nvidia.com> Tested-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Bibek Basu <bbasu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/common/fifo/channel.c28
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/channel.h4
2 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/channel.c b/drivers/gpu/nvgpu/common/fifo/channel.c
index fc82748b..cbffb6de 100644
--- a/drivers/gpu/nvgpu/common/fifo/channel.c
+++ b/drivers/gpu/nvgpu/common/fifo/channel.c
@@ -1425,14 +1425,14 @@ static void gk20a_channel_timeout_start(struct channel_gk20a *ch)
1425 return; 1425 return;
1426 } 1426 }
1427 1427
1428 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1428 nvgpu_spinlock_acquire(&ch->timeout.lock);
1429 1429
1430 if (ch->timeout.running) { 1430 if (ch->timeout.running) {
1431 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1431 nvgpu_spinlock_release(&ch->timeout.lock);
1432 return; 1432 return;
1433 } 1433 }
1434 __gk20a_channel_timeout_start(ch); 1434 __gk20a_channel_timeout_start(ch);
1435 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1435 nvgpu_spinlock_release(&ch->timeout.lock);
1436} 1436}
1437 1437
1438/** 1438/**
@@ -1450,10 +1450,10 @@ static bool gk20a_channel_timeout_stop(struct channel_gk20a *ch)
1450{ 1450{
1451 bool was_running; 1451 bool was_running;
1452 1452
1453 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1453 nvgpu_spinlock_acquire(&ch->timeout.lock);
1454 was_running = ch->timeout.running; 1454 was_running = ch->timeout.running;
1455 ch->timeout.running = false; 1455 ch->timeout.running = false;
1456 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1456 nvgpu_spinlock_release(&ch->timeout.lock);
1457 return was_running; 1457 return was_running;
1458} 1458}
1459 1459
@@ -1468,9 +1468,9 @@ static bool gk20a_channel_timeout_stop(struct channel_gk20a *ch)
1468 */ 1468 */
1469static void gk20a_channel_timeout_continue(struct channel_gk20a *ch) 1469static void gk20a_channel_timeout_continue(struct channel_gk20a *ch)
1470{ 1470{
1471 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1471 nvgpu_spinlock_acquire(&ch->timeout.lock);
1472 ch->timeout.running = true; 1472 ch->timeout.running = true;
1473 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1473 nvgpu_spinlock_release(&ch->timeout.lock);
1474} 1474}
1475 1475
1476/** 1476/**
@@ -1485,11 +1485,11 @@ static void gk20a_channel_timeout_continue(struct channel_gk20a *ch)
1485 */ 1485 */
1486static void gk20a_channel_timeout_rewind(struct channel_gk20a *ch) 1486static void gk20a_channel_timeout_rewind(struct channel_gk20a *ch)
1487{ 1487{
1488 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1488 nvgpu_spinlock_acquire(&ch->timeout.lock);
1489 if (ch->timeout.running) { 1489 if (ch->timeout.running) {
1490 __gk20a_channel_timeout_start(ch); 1490 __gk20a_channel_timeout_start(ch);
1491 } 1491 }
1492 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1492 nvgpu_spinlock_release(&ch->timeout.lock);
1493} 1493}
1494 1494
1495/** 1495/**
@@ -1544,10 +1544,10 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
1544 } 1544 }
1545 1545
1546 /* Get status but keep timer running */ 1546 /* Get status but keep timer running */
1547 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1547 nvgpu_spinlock_acquire(&ch->timeout.lock);
1548 gp_get = ch->timeout.gp_get; 1548 gp_get = ch->timeout.gp_get;
1549 pb_get = ch->timeout.pb_get; 1549 pb_get = ch->timeout.pb_get;
1550 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1550 nvgpu_spinlock_release(&ch->timeout.lock);
1551 1551
1552 new_gp_get = g->ops.fifo.userd_gp_get(ch->g, ch); 1552 new_gp_get = g->ops.fifo.userd_gp_get(ch->g, ch);
1553 new_pb_get = g->ops.fifo.userd_pb_get(ch->g, ch); 1553 new_pb_get = g->ops.fifo.userd_pb_get(ch->g, ch);
@@ -1587,9 +1587,9 @@ static void gk20a_channel_timeout_check(struct channel_gk20a *ch)
1587{ 1587{
1588 bool running; 1588 bool running;
1589 1589
1590 nvgpu_raw_spinlock_acquire(&ch->timeout.lock); 1590 nvgpu_spinlock_acquire(&ch->timeout.lock);
1591 running = ch->timeout.running; 1591 running = ch->timeout.running;
1592 nvgpu_raw_spinlock_release(&ch->timeout.lock); 1592 nvgpu_spinlock_release(&ch->timeout.lock);
1593 1593
1594 if (running) { 1594 if (running) {
1595 gk20a_channel_timeout_handler(ch); 1595 gk20a_channel_timeout_handler(ch);
@@ -2243,7 +2243,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
2243 nvgpu_spinlock_init(&c->ref_actions_lock); 2243 nvgpu_spinlock_init(&c->ref_actions_lock);
2244#endif 2244#endif
2245 nvgpu_spinlock_init(&c->joblist.dynamic.lock); 2245 nvgpu_spinlock_init(&c->joblist.dynamic.lock);
2246 nvgpu_raw_spinlock_init(&c->timeout.lock); 2246 nvgpu_spinlock_init(&c->timeout.lock);
2247 2247
2248 nvgpu_init_list_node(&c->joblist.dynamic.jobs); 2248 nvgpu_init_list_node(&c->joblist.dynamic.jobs);
2249 nvgpu_init_list_node(&c->dbg_s_list); 2249 nvgpu_init_list_node(&c->dbg_s_list);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/channel.h b/drivers/gpu/nvgpu/include/nvgpu/channel.h
index 0a956c66..ba3d548e 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/channel.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/channel.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -166,7 +166,7 @@ struct channel_gk20a_joblist {
166 166
167struct channel_gk20a_timeout { 167struct channel_gk20a_timeout {
168 /* lock protects the running timer state */ 168 /* lock protects the running timer state */
169 struct nvgpu_raw_spinlock lock; 169 struct nvgpu_spinlock lock;
170 struct nvgpu_timeout timer; 170 struct nvgpu_timeout timer;
171 bool running; 171 bool running;
172 u32 gp_get; 172 u32 gp_get;