summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2017-05-22 13:39:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-30 14:04:05 -0400
commitc3192b5acc03bf4e65aa1cbefb3a9ea88d87d9bd (patch)
tree8b28808f3b90ea54981153d42e640133f0ce0e1b
parente8f4e52efe6bfd019c8be5de6ebf07f56463d562 (diff)
gpu: nvgpu: use mmu_fault_info struct for legacy gpu chips
Removed fifo_mmu_fault_info_gk20a struct to use new mmu_fault_info struct JIRA GPUT19X-7 JIRA GPUT19X-12 Change-Id: I1987ff1b07e7dbdbee58d7e5f585faacf4846e54 Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: http://git-master/r/1487240 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c146
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.h15
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gk20a/hw_fifo_gk20a.h6
-rw-r--r--include/trace/events/gk20a.h57
5 files changed, 129 insertions, 99 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index b8ff84df..c7cd1d73 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -36,6 +36,7 @@
36#include "gk20a.h" 36#include "gk20a.h"
37#include "debug_gk20a.h" 37#include "debug_gk20a.h"
38#include "ctxsw_trace_gk20a.h" 38#include "ctxsw_trace_gk20a.h"
39#include "mm_gk20a.h"
39 40
40#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h> 41#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
41#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 42#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
@@ -1160,51 +1161,78 @@ static const char * const gpc_client_descs[] = {
1160 "rgg utlb", 1161 "rgg utlb",
1161}; 1162};
1162 1163
1163/* reads info from hardware and fills in mmu fault info record */ 1164static const char * const does_not_exist[] = {
1164static inline void get_exception_mmu_fault_info( 1165 "does not exist"
1165 struct gk20a *g, u32 engine_id, 1166};
1166 struct fifo_mmu_fault_info_gk20a *f)
1167{
1168 u32 fault_info_v;
1169
1170 gk20a_dbg_fn("engine_id %d", engine_id);
1171
1172 memset(f, 0, sizeof(*f));
1173
1174 f->fault_info_v = fault_info_v = gk20a_readl(g,
1175 fifo_intr_mmu_fault_info_r(engine_id));
1176 f->fault_type_v =
1177 fifo_intr_mmu_fault_info_type_v(fault_info_v);
1178 f->engine_subid_v =
1179 fifo_intr_mmu_fault_info_engine_subid_v(fault_info_v);
1180 f->client_v = fifo_intr_mmu_fault_info_client_v(fault_info_v);
1181 1167
1182 BUG_ON(f->fault_type_v >= ARRAY_SIZE(fault_type_descs)); 1168/* reads info from hardware and fills in mmu fault info record */
1183 f->fault_type_desc = fault_type_descs[f->fault_type_v]; 1169static void get_exception_mmu_fault_info(
1170 struct gk20a *g, u32 mmu_fault_id,
1171 struct mmu_fault_info *mmfault)
1172{
1173 u32 fault_info;
1174 u32 addr_lo, addr_hi;
1175
1176 gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id);
1177
1178 memset(mmfault, 0, sizeof(*mmfault));
1179
1180 fault_info = gk20a_readl(g,
1181 fifo_intr_mmu_fault_info_r(mmu_fault_id));
1182 mmfault->fault_type =
1183 fifo_intr_mmu_fault_info_type_v(fault_info);
1184 mmfault->access_type =
1185 fifo_intr_mmu_fault_info_write_v(fault_info);
1186 mmfault->client_type =
1187 fifo_intr_mmu_fault_info_engine_subid_v(fault_info);
1188 mmfault->client_id =
1189 fifo_intr_mmu_fault_info_client_v(fault_info);
1190
1191 if (mmfault->fault_type >= ARRAY_SIZE(fault_type_descs)) {
1192 WARN_ON(mmfault->fault_type >= ARRAY_SIZE(fault_type_descs));
1193 mmfault->fault_type_desc = does_not_exist[0];
1194 } else {
1195 mmfault->fault_type_desc =
1196 fault_type_descs[mmfault->fault_type];
1197 }
1184 1198
1185 BUG_ON(f->engine_subid_v >= ARRAY_SIZE(engine_subid_descs)); 1199 if (mmfault->client_type >= ARRAY_SIZE(engine_subid_descs)) {
1186 f->engine_subid_desc = engine_subid_descs[f->engine_subid_v]; 1200 WARN_ON(mmfault->client_type >= ARRAY_SIZE(engine_subid_descs));
1201 mmfault->client_type_desc = does_not_exist[0];
1202 } else {
1203 mmfault->client_type_desc =
1204 engine_subid_descs[mmfault->client_type];
1205 }
1187 1206
1188 if (f->engine_subid_v == 1207 mmfault->client_id_desc = does_not_exist[0];
1208 if (mmfault->client_type ==
1189 fifo_intr_mmu_fault_info_engine_subid_hub_v()) { 1209 fifo_intr_mmu_fault_info_engine_subid_hub_v()) {
1190 1210
1191 BUG_ON(f->client_v >= ARRAY_SIZE(hub_client_descs)); 1211 if (mmfault->client_id >=
1192 f->client_desc = hub_client_descs[f->client_v]; 1212 ARRAY_SIZE(hub_client_descs))
1193 } else if (f->engine_subid_v == 1213 WARN_ON(mmfault->client_id >=
1194 fifo_intr_mmu_fault_info_engine_subid_gpc_v()) { 1214 ARRAY_SIZE(hub_client_descs));
1195 BUG_ON(f->client_v >= ARRAY_SIZE(gpc_client_descs)); 1215 else
1196 f->client_desc = gpc_client_descs[f->client_v]; 1216 mmfault->client_id_desc =
1197 } else { 1217 hub_client_descs[mmfault->client_id];
1198 BUG_ON(1); 1218 } else if (mmfault->client_type ==
1219 fifo_intr_mmu_fault_info_engine_subid_gpc_v()) {
1220 if (mmfault->client_id >= ARRAY_SIZE(gpc_client_descs))
1221 WARN_ON(mmfault->client_id >=
1222 ARRAY_SIZE(gpc_client_descs));
1223 else
1224 mmfault->client_id_desc =
1225 gpc_client_descs[mmfault->client_id];
1199 } 1226 }
1200 1227
1201 f->fault_hi_v = gk20a_readl(g, fifo_intr_mmu_fault_hi_r(engine_id)); 1228 addr_lo = gk20a_readl(g, fifo_intr_mmu_fault_lo_r(mmu_fault_id));
1202 f->fault_lo_v = gk20a_readl(g, fifo_intr_mmu_fault_lo_r(engine_id)); 1229 addr_hi = gk20a_readl(g, fifo_intr_mmu_fault_hi_r(mmu_fault_id));
1230 mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo);
1203 /* note:ignoring aperture on gk20a... */ 1231 /* note:ignoring aperture on gk20a... */
1204 f->inst_ptr = fifo_intr_mmu_fault_inst_ptr_v( 1232 mmfault->inst_ptr = fifo_intr_mmu_fault_inst_ptr_v(
1205 gk20a_readl(g, fifo_intr_mmu_fault_inst_r(engine_id))); 1233 gk20a_readl(g, fifo_intr_mmu_fault_inst_r(mmu_fault_id)));
1206 /* note: inst_ptr is a 40b phys addr. */ 1234 /* note: inst_ptr is a 40b phys addr. */
1207 f->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v(); 1235 mmfault->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v();
1208} 1236}
1209 1237
1210void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id) 1238void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
@@ -1519,7 +1547,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1519 * engines. Convert engine_mmu_id to engine_id */ 1547 * engines. Convert engine_mmu_id to engine_id */
1520 u32 engine_id = gk20a_mmu_id_to_engine_id(g, 1548 u32 engine_id = gk20a_mmu_id_to_engine_id(g,
1521 engine_mmu_fault_id); 1549 engine_mmu_fault_id);
1522 struct fifo_mmu_fault_info_gk20a f; 1550 struct mmu_fault_info mmfault_info;
1523 struct channel_gk20a *ch = NULL; 1551 struct channel_gk20a *ch = NULL;
1524 struct tsg_gk20a *tsg = NULL; 1552 struct tsg_gk20a *tsg = NULL;
1525 struct channel_gk20a *refch = NULL; 1553 struct channel_gk20a *refch = NULL;
@@ -1533,26 +1561,29 @@ static bool gk20a_fifo_handle_mmu_fault(
1533 || ctx_status == 1561 || ctx_status ==
1534 fifo_engine_status_ctx_status_ctxsw_load_v()); 1562 fifo_engine_status_ctx_status_ctxsw_load_v());
1535 1563
1536 get_exception_mmu_fault_info(g, engine_mmu_fault_id, &f); 1564 get_exception_mmu_fault_info(g, engine_mmu_fault_id,
1537 trace_gk20a_mmu_fault(f.fault_hi_v, 1565 &mmfault_info);
1538 f.fault_lo_v, 1566 trace_gk20a_mmu_fault(mmfault_info.fault_addr,
1539 f.fault_info_v, 1567 mmfault_info.fault_type,
1540 f.inst_ptr, 1568 mmfault_info.access_type,
1569 mmfault_info.inst_ptr,
1541 engine_id, 1570 engine_id,
1542 f.engine_subid_desc, 1571 mmfault_info.client_type_desc,
1543 f.client_desc, 1572 mmfault_info.client_id_desc,
1544 f.fault_type_desc); 1573 mmfault_info.fault_type_desc);
1545 nvgpu_err(g, "%s mmu fault on engine %d, " 1574 nvgpu_err(g, "%s mmu fault on engine %d, "
1546 "engine subid %d (%s), client %d (%s), " 1575 "engine subid %d (%s), client %d (%s), "
1547 "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x," 1576 "addr 0x%llx, type %d (%s), access_type 0x%08x,"
1548 "inst_ptr 0x%llx", 1577 "inst_ptr 0x%llx\n",
1549 fake_fault ? "fake" : "", 1578 fake_fault ? "fake" : "",
1550 engine_id, 1579 engine_id,
1551 f.engine_subid_v, f.engine_subid_desc, 1580 mmfault_info.client_type,
1552 f.client_v, f.client_desc, 1581 mmfault_info.client_type_desc,
1553 f.fault_hi_v, f.fault_lo_v, 1582 mmfault_info.client_id, mmfault_info.client_id_desc,
1554 f.fault_type_v, f.fault_type_desc, 1583 mmfault_info.fault_addr,
1555 f.fault_info_v, f.inst_ptr); 1584 mmfault_info.fault_type,
1585 mmfault_info.fault_type_desc,
1586 mmfault_info.access_type, mmfault_info.inst_ptr);
1556 1587
1557 if (ctxsw) { 1588 if (ctxsw) {
1558 gk20a_fecs_dump_falcon_stats(g); 1589 gk20a_fecs_dump_falcon_stats(g);
@@ -1589,7 +1620,8 @@ static bool gk20a_fifo_handle_mmu_fault(
1589 } 1620 }
1590 } else { 1621 } else {
1591 /* read channel based on instruction pointer */ 1622 /* read channel based on instruction pointer */
1592 ch = gk20a_refch_from_inst_ptr(g, f.inst_ptr); 1623 ch = gk20a_refch_from_inst_ptr(g,
1624 mmfault_info.inst_ptr);
1593 refch = ch; 1625 refch = ch;
1594 } 1626 }
1595 1627
@@ -1599,8 +1631,8 @@ static bool gk20a_fifo_handle_mmu_fault(
1599 /* check if engine reset should be deferred */ 1631 /* check if engine reset should be deferred */
1600 if (engine_id != FIFO_INVAL_ENGINE_ID) { 1632 if (engine_id != FIFO_INVAL_ENGINE_ID) {
1601 bool defer = gk20a_fifo_should_defer_engine_reset(g, 1633 bool defer = gk20a_fifo_should_defer_engine_reset(g,
1602 engine_id, f.engine_subid_v, 1634 engine_id, mmfault_info.client_type,
1603 fake_fault); 1635 fake_fault);
1604 if ((ch || tsg) && defer) { 1636 if ((ch || tsg) && defer) {
1605 g->fifo.deferred_fault_engines |= BIT(engine_id); 1637 g->fifo.deferred_fault_engines |= BIT(engine_id);
1606 1638
@@ -1656,10 +1688,10 @@ static bool gk20a_fifo_handle_mmu_fault(
1656 "mmu error in freed channel %d", 1688 "mmu error in freed channel %d",
1657 ch->hw_chid); 1689 ch->hw_chid);
1658 } 1690 }
1659 } else if (f.inst_ptr == 1691 } else if (mmfault_info.inst_ptr ==
1660 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) { 1692 gk20a_mm_inst_block_addr(g, &g->mm.bar1.inst_block)) {
1661 nvgpu_err(g, "mmu fault from bar1"); 1693 nvgpu_err(g, "mmu fault from bar1");
1662 } else if (f.inst_ptr == 1694 } else if (mmfault_info.inst_ptr ==
1663 gk20a_mm_inst_block_addr(g, &g->mm.pmu.inst_block)) { 1695 gk20a_mm_inst_block_addr(g, &g->mm.pmu.inst_block)) {
1664 nvgpu_err(g, "mmu fault from pmu"); 1696 nvgpu_err(g, "mmu fault from pmu");
1665 } else 1697 } else
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
index 80f1853c..55075f3b 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h
@@ -104,19 +104,6 @@ struct fifo_engine_exception_info_gk20a {
104 bool faulted, idle, ctxsw_in_progress; 104 bool faulted, idle, ctxsw_in_progress;
105}; 105};
106 106
107struct fifo_mmu_fault_info_gk20a {
108 u32 fault_info_v;
109 u32 fault_type_v;
110 u32 engine_subid_v;
111 u32 client_v;
112 u32 fault_hi_v;
113 u32 fault_lo_v;
114 u64 inst_ptr;
115 const char *fault_type_desc;
116 const char *engine_subid_desc;
117 const char *client_desc;
118};
119
120struct fifo_engine_info_gk20a { 107struct fifo_engine_info_gk20a {
121 u32 engine_id; 108 u32 engine_id;
122 u32 runlist_id; 109 u32 runlist_id;
@@ -129,8 +116,6 @@ struct fifo_engine_info_gk20a {
129 u32 engine_enum; 116 u32 engine_enum;
130 struct fifo_pbdma_exception_info_gk20a pbdma_exception_info; 117 struct fifo_pbdma_exception_info_gk20a pbdma_exception_info;
131 struct fifo_engine_exception_info_gk20a engine_exception_info; 118 struct fifo_engine_exception_info_gk20a engine_exception_info;
132 struct fifo_mmu_fault_info_gk20a mmu_fault_info;
133
134}; 119};
135 120
136enum { 121enum {
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 4adf346e..79b55371 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -173,8 +173,12 @@ struct mmu_fault_info {
173 u32 valid; 173 u32 valid;
174 u32 faulted_pbdma; 174 u32 faulted_pbdma;
175 u32 faulted_engine; 175 u32 faulted_engine;
176 u32 faulted_subid;
176 u32 hw_chid; 177 u32 hw_chid;
177 struct channel_gk20a *refch; 178 struct channel_gk20a *refch;
179 const char *client_type_desc;
180 const char *fault_type_desc;
181 const char *client_id_desc;
178}; 182};
179 183
180struct mm_gk20a { 184struct mm_gk20a {
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gk20a/hw_fifo_gk20a.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gk20a/hw_fifo_gk20a.h
index 4d54c89f..cdecfec7 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/hw/gk20a/hw_fifo_gk20a.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gk20a/hw_fifo_gk20a.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012-2016, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2012-2017, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
@@ -338,6 +338,10 @@ static inline u32 fifo_intr_mmu_fault_info_type_v(u32 r)
338{ 338{
339 return (r >> 0) & 0xf; 339 return (r >> 0) & 0xf;
340} 340}
341static inline u32 fifo_intr_mmu_fault_info_write_v(u32 r)
342{
343 return (r >> 7) & 0x1;
344}
341static inline u32 fifo_intr_mmu_fault_info_engine_subid_v(u32 r) 345static inline u32 fifo_intr_mmu_fault_info_engine_subid_v(u32 r)
342{ 346{
343 return (r >> 6) & 0x1; 347 return (r >> 6) & 0x1;
diff --git a/include/trace/events/gk20a.h b/include/trace/events/gk20a.h
index ae5255a4..3a8c8d23 100644
--- a/include/trace/events/gk20a.h
+++ b/include/trace/events/gk20a.h
@@ -463,39 +463,44 @@ TRACE_EVENT(gk20a_as_ioctl_get_va_regions,
463); 463);
464 464
465TRACE_EVENT(gk20a_mmu_fault, 465TRACE_EVENT(gk20a_mmu_fault,
466 TP_PROTO(u32 fault_hi, u32 fault_lo, 466 TP_PROTO(u64 fault_addr,
467 u32 fault_info, 467 u32 fault_type,
468 u64 instance, 468 u32 access_type,
469 u64 inst_ptr,
469 u32 engine_id, 470 u32 engine_id,
470 const char *engine, 471 const char *client_type_desc,
471 const char *client, 472 const char *client_id_desc,
472 const char *fault_type), 473 const char *fault_type_desc),
473 TP_ARGS(fault_hi, fault_lo, fault_info, 474 TP_ARGS(fault_addr, fault_type, access_type,
474 instance, engine_id, engine, client, fault_type), 475 inst_ptr, engine_id, client_type_desc,
476 client_id_desc, fault_type_desc),
475 TP_STRUCT__entry( 477 TP_STRUCT__entry(
476 __field(u32, fault_hi) 478 __field(u64, fault_addr)
477 __field(u32, fault_lo) 479 __field(u32, fault_type)
478 __field(u32, fault_info) 480 __field(u32, access_type)
479 __field(u64, instance) 481 __field(u64, inst_ptr)
480 __field(u32, engine_id) 482 __field(u32, engine_id)
481 __field(const char *, engine) 483 __field(const char *, client_type_desc)
482 __field(const char *, client) 484 __field(const char *, client_id_desc)
483 __field(const char *, fault_type) 485 __field(const char *, fault_type_desc)
484 ), 486 ),
485 TP_fast_assign( 487 TP_fast_assign(
486 __entry->fault_hi = fault_hi; 488 __entry->fault_addr = fault_addr;
487 __entry->fault_lo = fault_lo;
488 __entry->fault_info = fault_info;
489 __entry->instance = instance;
490 __entry->engine_id = engine_id;
491 __entry->engine = engine;
492 __entry->client = client;
493 __entry->fault_type = fault_type; 489 __entry->fault_type = fault_type;
490 __entry->access_type = access_type;
491 __entry->inst_ptr = inst_ptr;
492 __entry->engine_id = engine_id;
493 __entry->client_type_desc = client_type_desc;
494 __entry->client_id_desc = client_id_desc;
495 __entry->fault_type_desc = fault_type_desc;
494 ), 496 ),
495 TP_printk("fault=0x%x,%08x info=0x%x instance=0x%llx engine_id=%d engine=%s client=%s type=%s", 497 TP_printk("fault addr=0x%llx type=0x%x access_type=0x%x "
496 __entry->fault_hi, __entry->fault_lo, 498 "instance=0x%llx engine_id=%d client_type=%s "
497 __entry->fault_info, __entry->instance, __entry->engine_id, 499 "client_id=%s fault type=%s",
498 __entry->engine, __entry->client, __entry->fault_type) 500 __entry->fault_addr, __entry->fault_type,
501 __entry->access_type, __entry->inst_ptr,
502 __entry->engine_id, __entry->client_type_desc,
503 __entry->client_id_desc, __entry->fault_type_desc)
499); 504);
500 505
501TRACE_EVENT(gk20a_ltc_cbc_ctrl_start, 506TRACE_EVENT(gk20a_ltc_cbc_ctrl_start,