diff options
author | Lakshmanan M <lm@nvidia.com> | 2016-05-23 02:50:14 -0400 |
---|---|---|
committer | Terje Bergstrom <tbergstrom@nvidia.com> | 2016-05-27 14:36:52 -0400 |
commit | f3cb140a71f179ce023bac48d5b92537893214a1 (patch) | |
tree | 597639f2003949c53b0c15f0bd6c8e56be23838f /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |
parent | 20c65d8f4a6cca705472bbdde52bd2fce3c6e274 (diff) |
gpu: nvgpu: Add device_info_data support
Added device_info_data parsing
support for maxwell GPU series.
JIRA DNVGPU-26
Change-Id: I06dbec6056d4c26501e607c2c3d67ef468d206f4
Signed-off-by: Lakshmanan M <lm@nvidia.com>
Reviewed-on: http://git-master/r/1151602
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 85 |
1 files changed, 58 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index cf97b33a..16ca16d8 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -44,39 +44,49 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | |||
44 | * Link engine IDs to MMU IDs and vice versa. | 44 | * Link engine IDs to MMU IDs and vice versa. |
45 | */ | 45 | */ |
46 | 46 | ||
47 | static inline u32 gk20a_engine_id_to_mmu_id(u32 engine_id) | 47 | static inline u32 gk20a_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id) |
48 | { | 48 | { |
49 | switch (engine_id) { | 49 | u32 fault_id = ~0; |
50 | case ENGINE_GR_GK20A: | 50 | |
51 | return 0x00; | 51 | if (engine_id < ENGINE_INVAL_GK20A) { |
52 | case ENGINE_CE2_GK20A: | 52 | struct fifo_engine_info_gk20a *info = |
53 | return 0x1b; | 53 | &g->fifo.engine_info[engine_id]; |
54 | default: | 54 | |
55 | return ~0; | 55 | fault_id = info->fault_id; |
56 | } | 56 | } |
57 | return fault_id; | ||
57 | } | 58 | } |
58 | 59 | ||
59 | static inline u32 gk20a_mmu_id_to_engine_id(u32 engine_id) | 60 | static inline u32 gk20a_mmu_id_to_engine_id(struct gk20a *g, u32 fault_id) |
60 | { | 61 | { |
61 | switch (engine_id) { | 62 | u32 engine_id; |
62 | case 0x00: | 63 | u32 return_engine_id = ~0; |
63 | return ENGINE_GR_GK20A; | 64 | |
64 | case 0x1b: | 65 | for (engine_id = 0; engine_id < ENGINE_INVAL_GK20A; engine_id++) { |
65 | return ENGINE_CE2_GK20A; | 66 | struct fifo_engine_info_gk20a *info = |
66 | default: | 67 | &g->fifo.engine_info[engine_id]; |
67 | return ~0; | 68 | |
69 | if (info->fault_id == fault_id) { | ||
70 | return_engine_id = engine_id; | ||
71 | break; | ||
72 | } | ||
68 | } | 73 | } |
74 | return return_engine_id; | ||
69 | } | 75 | } |
70 | 76 | ||
71 | int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type) | 77 | int gk20a_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, |
78 | u32 *inst_id) | ||
72 | { | 79 | { |
73 | int ret = ENGINE_INVAL_GK20A; | 80 | int ret = ENGINE_INVAL_GK20A; |
74 | 81 | ||
75 | gk20a_dbg_info("engine type %d", engine_type); | 82 | gk20a_dbg_info("engine type %d", engine_type); |
76 | if (engine_type == top_device_info_type_enum_graphics_v()) | 83 | if (engine_type == top_device_info_type_enum_graphics_v()) |
77 | ret = ENGINE_GR_GK20A; | 84 | ret = ENGINE_GR_GK20A; |
78 | else if (engine_type == top_device_info_type_enum_copy2_v()) | 85 | else if (engine_type == top_device_info_type_enum_copy2_v()) { |
79 | ret = ENGINE_CE2_GK20A; | 86 | ret = ENGINE_CE2_GK20A; |
87 | if (inst_id) | ||
88 | *inst_id = 0x2; | ||
89 | } | ||
80 | else | 90 | else |
81 | gk20a_err(g->dev, "unknown engine %d", engine_type); | 91 | gk20a_err(g->dev, "unknown engine %d", engine_type); |
82 | 92 | ||
@@ -95,6 +105,9 @@ static int init_engine_info(struct fifo_gk20a *f) | |||
95 | u32 pbdma_id = ~0; | 105 | u32 pbdma_id = ~0; |
96 | u32 intr_id = ~0; | 106 | u32 intr_id = ~0; |
97 | u32 reset_id = ~0; | 107 | u32 reset_id = ~0; |
108 | u32 inst_id = 0; | ||
109 | u32 pri_base = 0; | ||
110 | u32 fault_id = 0; | ||
98 | 111 | ||
99 | gk20a_dbg_fn(""); | 112 | gk20a_dbg_fn(""); |
100 | 113 | ||
@@ -152,7 +165,15 @@ static int init_engine_info(struct fifo_gk20a *f) | |||
152 | u32 engine_type = | 165 | u32 engine_type = |
153 | top_device_info_type_enum_v(table_entry); | 166 | top_device_info_type_enum_v(table_entry); |
154 | engine_enum = | 167 | engine_enum = |
155 | g->ops.fifo.engine_enum_from_type(g, engine_type); | 168 | g->ops.fifo.engine_enum_from_type(g, |
169 | engine_type, &inst_id); | ||
170 | } else if (entry == top_device_info_entry_data_v()) { | ||
171 | /* gk20a don't support device_info_data | ||
172 | packet parsing */ | ||
173 | if (g->ops.fifo.device_info_data_parse) | ||
174 | g->ops.fifo.device_info_data_parse(g, | ||
175 | table_entry, &inst_id, &pri_base, | ||
176 | &fault_id); | ||
156 | } | 177 | } |
157 | 178 | ||
158 | if (!top_device_info_chain_v(table_entry)) { | 179 | if (!top_device_info_chain_v(table_entry)) { |
@@ -164,6 +185,13 @@ static int init_engine_info(struct fifo_gk20a *f) | |||
164 | info->reset_mask |= BIT(reset_id); | 185 | info->reset_mask |= BIT(reset_id); |
165 | info->runlist_id = runlist_id; | 186 | info->runlist_id = runlist_id; |
166 | info->pbdma_id = pbdma_id; | 187 | info->pbdma_id = pbdma_id; |
188 | info->inst_id = inst_id; | ||
189 | info->pri_base = pri_base; | ||
190 | |||
191 | if (!fault_id && | ||
192 | (engine_enum == ENGINE_CE2_GK20A)) | ||
193 | fault_id = 0x1b; | ||
194 | info->fault_id = fault_id; | ||
167 | 195 | ||
168 | engine_enum = ENGINE_INVAL_GK20A; | 196 | engine_enum = ENGINE_INVAL_GK20A; |
169 | } | 197 | } |
@@ -948,7 +976,7 @@ static bool gk20a_fifo_handle_mmu_fault( | |||
948 | { | 976 | { |
949 | bool fake_fault; | 977 | bool fake_fault; |
950 | unsigned long fault_id; | 978 | unsigned long fault_id; |
951 | unsigned long engine_mmu_id; | 979 | unsigned long engine_mmu_fault_id; |
952 | bool verbose = true; | 980 | bool verbose = true; |
953 | u32 grfifo_ctl; | 981 | u32 grfifo_ctl; |
954 | 982 | ||
@@ -988,10 +1016,11 @@ static bool gk20a_fifo_handle_mmu_fault( | |||
988 | 1016 | ||
989 | 1017 | ||
990 | /* go through all faulted engines */ | 1018 | /* go through all faulted engines */ |
991 | for_each_set_bit(engine_mmu_id, &fault_id, 32) { | 1019 | for_each_set_bit(engine_mmu_fault_id, &fault_id, 32) { |
992 | /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to | 1020 | /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to |
993 | * engines. Convert engine_mmu_id to engine_id */ | 1021 | * engines. Convert engine_mmu_id to engine_id */ |
994 | u32 engine_id = gk20a_mmu_id_to_engine_id(engine_mmu_id); | 1022 | u32 engine_id = gk20a_mmu_id_to_engine_id(g, |
1023 | engine_mmu_fault_id); | ||
995 | struct fifo_mmu_fault_info_gk20a f; | 1024 | struct fifo_mmu_fault_info_gk20a f; |
996 | struct channel_gk20a *ch = NULL; | 1025 | struct channel_gk20a *ch = NULL; |
997 | struct tsg_gk20a *tsg = NULL; | 1026 | struct tsg_gk20a *tsg = NULL; |
@@ -1007,7 +1036,7 @@ static bool gk20a_fifo_handle_mmu_fault( | |||
1007 | || ctx_status == | 1036 | || ctx_status == |
1008 | fifo_engine_status_ctx_status_ctxsw_load_v()); | 1037 | fifo_engine_status_ctx_status_ctxsw_load_v()); |
1009 | 1038 | ||
1010 | get_exception_mmu_fault_info(g, engine_mmu_id, &f); | 1039 | get_exception_mmu_fault_info(g, engine_mmu_fault_id, &f); |
1011 | trace_gk20a_mmu_fault(f.fault_hi_v, | 1040 | trace_gk20a_mmu_fault(f.fault_hi_v, |
1012 | f.fault_lo_v, | 1041 | f.fault_lo_v, |
1013 | f.fault_info_v, | 1042 | f.fault_info_v, |
@@ -1189,7 +1218,7 @@ static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g, | |||
1189 | 1218 | ||
1190 | gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), | 1219 | gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), |
1191 | fifo_trigger_mmu_fault_id_f( | 1220 | fifo_trigger_mmu_fault_id_f( |
1192 | gk20a_engine_id_to_mmu_id(engine_id)) | | 1221 | gk20a_engine_id_to_mmu_id(g, engine_id)) | |
1193 | fifo_trigger_mmu_fault_enable_f(1)); | 1222 | fifo_trigger_mmu_fault_enable_f(1)); |
1194 | } | 1223 | } |
1195 | 1224 | ||
@@ -1332,7 +1361,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | |||
1332 | engine_ids |= __engine_ids; | 1361 | engine_ids |= __engine_ids; |
1333 | for_each_set_bit(engine_id, &engine_ids, 32) { | 1362 | for_each_set_bit(engine_id, &engine_ids, 32) { |
1334 | mmu_fault_engines |= | 1363 | mmu_fault_engines |= |
1335 | BIT(gk20a_engine_id_to_mmu_id(engine_id)); | 1364 | BIT(gk20a_engine_id_to_mmu_id(g, engine_id)); |
1336 | } | 1365 | } |
1337 | } else { | 1366 | } else { |
1338 | /* store faulted engines in advance */ | 1367 | /* store faulted engines in advance */ |
@@ -1353,7 +1382,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids, | |||
1353 | if (ref_type == type && ref_id == id) { | 1382 | if (ref_type == type && ref_id == id) { |
1354 | engine_ids |= BIT(i); | 1383 | engine_ids |= BIT(i); |
1355 | mmu_fault_engines |= | 1384 | mmu_fault_engines |= |
1356 | BIT(gk20a_engine_id_to_mmu_id(i)); | 1385 | BIT(gk20a_engine_id_to_mmu_id(g, i)); |
1357 | } | 1386 | } |
1358 | } | 1387 | } |
1359 | } | 1388 | } |
@@ -2728,4 +2757,6 @@ void gk20a_init_fifo(struct gpu_ops *gops) | |||
2728 | gops->fifo.set_runlist_interleave = gk20a_fifo_set_runlist_interleave; | 2757 | gops->fifo.set_runlist_interleave = gk20a_fifo_set_runlist_interleave; |
2729 | gops->fifo.force_reset_ch = gk20a_fifo_force_reset_ch; | 2758 | gops->fifo.force_reset_ch = gk20a_fifo_force_reset_ch; |
2730 | gops->fifo.engine_enum_from_type = gk20a_fifo_engine_enum_from_type; | 2759 | gops->fifo.engine_enum_from_type = gk20a_fifo_engine_enum_from_type; |
2760 | /* gk20a don't support device_info_data packet parsing */ | ||
2761 | gops->fifo.device_info_data_parse = NULL; | ||
2731 | } | 2762 | } |