diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2016-11-08 16:36:17 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2016-11-11 11:21:06 -0500 |
commit | 268e772e807460cee64e354c025d43d8e24574b8 (patch) | |
tree | f2f4232c4530070f238c95759ad7f2eb5a9f1d14 /drivers/gpu/nvgpu/gk20a | |
parent | c30f649c4f85929580490180122a1e8c5edb6098 (diff) |
gpu: nvgpu: Deal with invalid MMU id
If gk20a_engine_id_to_mmu_id() fails, it returns ~0. Deal with this
by checking the results in each call to it.
Change-Id: I6fb9f7151f21a6c4694bfb2ea3c960d344fe629f
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1249965
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index b4850ee3..9887b68f 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -1557,15 +1557,18 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, | |||
1557 | 1557 | ||
1558 | /* trigger faults for all bad engines */ | 1558 | /* trigger faults for all bad engines */ |
1559 | for_each_set_bit(engine_id, &engine_ids, 32) { | 1559 | for_each_set_bit(engine_id, &engine_ids, 32) { |
1560 | u32 mmu_id; | ||
1561 | |||
1560 | if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { | 1562 | if (!gk20a_fifo_is_valid_engine_id(g, engine_id)) { |
1561 | WARN_ON(true); | 1563 | WARN_ON(true); |
1562 | break; | 1564 | break; |
1563 | } | 1565 | } |
1564 | 1566 | ||
1565 | gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), | 1567 | mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); |
1566 | fifo_trigger_mmu_fault_id_f( | 1568 | if (mmu_id != ~0) |
1567 | gk20a_engine_id_to_mmu_id(g, engine_id)) | | 1569 | gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), |
1568 | fifo_trigger_mmu_fault_enable_f(1)); | 1570 | fifo_trigger_mmu_fault_id_f(mmu_id) | |
1571 | fifo_trigger_mmu_fault_enable_f(1)); | ||
1569 | } | 1572 | } |
1570 | 1573 | ||
1571 | /* Wait for MMU fault to trigger */ | 1574 | /* Wait for MMU fault to trigger */ |
@@ -1707,8 +1710,10 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | |||
1707 | /* atleast one engine will get passed during sched err*/ | 1710 | /* atleast one engine will get passed during sched err*/ |
1708 | engine_ids |= __engine_ids; | 1711 | engine_ids |= __engine_ids; |
1709 | for_each_set_bit(engine_id, &engine_ids, 32) { | 1712 | for_each_set_bit(engine_id, &engine_ids, 32) { |
1710 | mmu_fault_engines |= | 1713 | u32 mmu_id = gk20a_engine_id_to_mmu_id(g, engine_id); |
1711 | BIT(gk20a_engine_id_to_mmu_id(g, engine_id)); | 1714 | |
1715 | if (mmu_id != ~0) | ||
1716 | mmu_fault_engines |= BIT(mmu_id); | ||
1712 | } | 1717 | } |
1713 | } else { | 1718 | } else { |
1714 | /* store faulted engines in advance */ | 1719 | /* store faulted engines in advance */ |
@@ -1728,9 +1733,11 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | |||
1728 | 1733 | ||
1729 | gk20a_fifo_get_faulty_id_type(g, active_engine_id, &id, &type); | 1734 | gk20a_fifo_get_faulty_id_type(g, active_engine_id, &id, &type); |
1730 | if (ref_type == type && ref_id == id) { | 1735 | if (ref_type == type && ref_id == id) { |
1736 | u32 mmu_id = gk20a_engine_id_to_mmu_id(g, active_engine_id); | ||
1737 | |||
1731 | engine_ids |= BIT(active_engine_id); | 1738 | engine_ids |= BIT(active_engine_id); |
1732 | mmu_fault_engines |= | 1739 | if (mmu_id != ~0) |
1733 | BIT(gk20a_engine_id_to_mmu_id(g, active_engine_id)); | 1740 | mmu_fault_engines |= BIT(mmu_id); |
1734 | } | 1741 | } |
1735 | } | 1742 | } |
1736 | } | 1743 | } |