diff options
Diffstat (limited to 'arch/powerpc/platforms/powernv/eeh-ioda.c')
-rw-r--r-- | arch/powerpc/platforms/powernv/eeh-ioda.c | 1149 |
1 files changed, 0 insertions, 1149 deletions
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c deleted file mode 100644 index 2809c9895288..000000000000 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ /dev/null | |||
@@ -1,1149 +0,0 @@ | |||
1 | /* | ||
2 | * The file intends to implement the functions needed by EEH, which is | ||
3 | * built on IODA compliant chip. Actually, lots of functions related | ||
4 | * to EEH would be built based on the OPAL APIs. | ||
5 | * | ||
6 | * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/debugfs.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/irq.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/msi.h> | ||
20 | #include <linux/notifier.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/string.h> | ||
23 | |||
24 | #include <asm/eeh.h> | ||
25 | #include <asm/eeh_event.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/iommu.h> | ||
28 | #include <asm/msi_bitmap.h> | ||
29 | #include <asm/opal.h> | ||
30 | #include <asm/pci-bridge.h> | ||
31 | #include <asm/ppc-pci.h> | ||
32 | #include <asm/tce.h> | ||
33 | |||
34 | #include "powernv.h" | ||
35 | #include "pci.h" | ||
36 | |||
37 | static int ioda_eeh_nb_init = 0; | ||
38 | |||
39 | static int ioda_eeh_event(struct notifier_block *nb, | ||
40 | unsigned long events, void *change) | ||
41 | { | ||
42 | uint64_t changed_evts = (uint64_t)change; | ||
43 | |||
44 | /* | ||
45 | * We simply send special EEH event if EEH has | ||
46 | * been enabled, or clear pending events in | ||
47 | * case that we enable EEH soon | ||
48 | */ | ||
49 | if (!(changed_evts & OPAL_EVENT_PCI_ERROR) || | ||
50 | !(events & OPAL_EVENT_PCI_ERROR)) | ||
51 | return 0; | ||
52 | |||
53 | if (eeh_enabled()) | ||
54 | eeh_send_failure_event(NULL); | ||
55 | else | ||
56 | opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static struct notifier_block ioda_eeh_nb = { | ||
62 | .notifier_call = ioda_eeh_event, | ||
63 | .next = NULL, | ||
64 | .priority = 0 | ||
65 | }; | ||
66 | |||
67 | #ifdef CONFIG_DEBUG_FS | ||
68 | static ssize_t ioda_eeh_ei_write(struct file *filp, | ||
69 | const char __user *user_buf, | ||
70 | size_t count, loff_t *ppos) | ||
71 | { | ||
72 | struct pci_controller *hose = filp->private_data; | ||
73 | struct pnv_phb *phb = hose->private_data; | ||
74 | struct eeh_dev *edev; | ||
75 | struct eeh_pe *pe; | ||
76 | int pe_no, type, func; | ||
77 | unsigned long addr, mask; | ||
78 | char buf[50]; | ||
79 | int ret; | ||
80 | |||
81 | if (!phb->eeh_ops || !phb->eeh_ops->err_inject) | ||
82 | return -ENXIO; | ||
83 | |||
84 | ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); | ||
85 | if (!ret) | ||
86 | return -EFAULT; | ||
87 | |||
88 | /* Retrieve parameters */ | ||
89 | ret = sscanf(buf, "%x:%x:%x:%lx:%lx", | ||
90 | &pe_no, &type, &func, &addr, &mask); | ||
91 | if (ret != 5) | ||
92 | return -EINVAL; | ||
93 | |||
94 | /* Retrieve PE */ | ||
95 | edev = kzalloc(sizeof(*edev), GFP_KERNEL); | ||
96 | if (!edev) | ||
97 | return -ENOMEM; | ||
98 | edev->phb = hose; | ||
99 | edev->pe_config_addr = pe_no; | ||
100 | pe = eeh_pe_get(edev); | ||
101 | kfree(edev); | ||
102 | if (!pe) | ||
103 | return -ENODEV; | ||
104 | |||
105 | /* Do error injection */ | ||
106 | ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask); | ||
107 | return ret < 0 ? ret : count; | ||
108 | } | ||
109 | |||
110 | static const struct file_operations ioda_eeh_ei_fops = { | ||
111 | .open = simple_open, | ||
112 | .llseek = no_llseek, | ||
113 | .write = ioda_eeh_ei_write, | ||
114 | }; | ||
115 | |||
116 | static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val) | ||
117 | { | ||
118 | struct pci_controller *hose = data; | ||
119 | struct pnv_phb *phb = hose->private_data; | ||
120 | |||
121 | out_be64(phb->regs + offset, val); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val) | ||
126 | { | ||
127 | struct pci_controller *hose = data; | ||
128 | struct pnv_phb *phb = hose->private_data; | ||
129 | |||
130 | *val = in_be64(phb->regs + offset); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int ioda_eeh_outb_dbgfs_set(void *data, u64 val) | ||
135 | { | ||
136 | return ioda_eeh_dbgfs_set(data, 0xD10, val); | ||
137 | } | ||
138 | |||
139 | static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val) | ||
140 | { | ||
141 | return ioda_eeh_dbgfs_get(data, 0xD10, val); | ||
142 | } | ||
143 | |||
144 | static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val) | ||
145 | { | ||
146 | return ioda_eeh_dbgfs_set(data, 0xD90, val); | ||
147 | } | ||
148 | |||
149 | static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val) | ||
150 | { | ||
151 | return ioda_eeh_dbgfs_get(data, 0xD90, val); | ||
152 | } | ||
153 | |||
154 | static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val) | ||
155 | { | ||
156 | return ioda_eeh_dbgfs_set(data, 0xE10, val); | ||
157 | } | ||
158 | |||
159 | static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val) | ||
160 | { | ||
161 | return ioda_eeh_dbgfs_get(data, 0xE10, val); | ||
162 | } | ||
163 | |||
164 | DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get, | ||
165 | ioda_eeh_outb_dbgfs_set, "0x%llx\n"); | ||
166 | DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get, | ||
167 | ioda_eeh_inbA_dbgfs_set, "0x%llx\n"); | ||
168 | DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get, | ||
169 | ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); | ||
170 | #endif /* CONFIG_DEBUG_FS */ | ||
171 | |||
172 | |||
173 | /** | ||
174 | * ioda_eeh_post_init - Chip dependent post initialization | ||
175 | * @hose: PCI controller | ||
176 | * | ||
177 | * The function will be called after eeh PEs and devices | ||
178 | * have been built. That means the EEH is ready to supply | ||
179 | * service with I/O cache. | ||
180 | */ | ||
181 | static int ioda_eeh_post_init(struct pci_controller *hose) | ||
182 | { | ||
183 | struct pnv_phb *phb = hose->private_data; | ||
184 | int ret; | ||
185 | |||
186 | /* Register OPAL event notifier */ | ||
187 | if (!ioda_eeh_nb_init) { | ||
188 | ret = opal_notifier_register(&ioda_eeh_nb); | ||
189 | if (ret) { | ||
190 | pr_err("%s: Can't register OPAL event notifier (%d)\n", | ||
191 | __func__, ret); | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | ioda_eeh_nb_init = 1; | ||
196 | } | ||
197 | |||
198 | #ifdef CONFIG_DEBUG_FS | ||
199 | if (!phb->has_dbgfs && phb->dbgfs) { | ||
200 | phb->has_dbgfs = 1; | ||
201 | |||
202 | debugfs_create_file("err_injct", 0200, | ||
203 | phb->dbgfs, hose, | ||
204 | &ioda_eeh_ei_fops); | ||
205 | |||
206 | debugfs_create_file("err_injct_outbound", 0600, | ||
207 | phb->dbgfs, hose, | ||
208 | &ioda_eeh_outb_dbgfs_ops); | ||
209 | debugfs_create_file("err_injct_inboundA", 0600, | ||
210 | phb->dbgfs, hose, | ||
211 | &ioda_eeh_inbA_dbgfs_ops); | ||
212 | debugfs_create_file("err_injct_inboundB", 0600, | ||
213 | phb->dbgfs, hose, | ||
214 | &ioda_eeh_inbB_dbgfs_ops); | ||
215 | } | ||
216 | #endif | ||
217 | |||
218 | /* If EEH is enabled, we're going to rely on that. | ||
219 | * Otherwise, we restore to conventional mechanism | ||
220 | * to clear frozen PE during PCI config access. | ||
221 | */ | ||
222 | if (eeh_enabled()) | ||
223 | phb->flags |= PNV_PHB_FLAG_EEH; | ||
224 | else | ||
225 | phb->flags &= ~PNV_PHB_FLAG_EEH; | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * ioda_eeh_set_option - Set EEH operation or I/O setting | ||
232 | * @pe: EEH PE | ||
233 | * @option: options | ||
234 | * | ||
235 | * Enable or disable EEH option for the indicated PE. The | ||
236 | * function also can be used to enable I/O or DMA for the | ||
237 | * PE. | ||
238 | */ | ||
239 | static int ioda_eeh_set_option(struct eeh_pe *pe, int option) | ||
240 | { | ||
241 | struct pci_controller *hose = pe->phb; | ||
242 | struct pnv_phb *phb = hose->private_data; | ||
243 | bool freeze_pe = false; | ||
244 | int enable, ret = 0; | ||
245 | s64 rc; | ||
246 | |||
247 | /* Check on PE number */ | ||
248 | if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) { | ||
249 | pr_err("%s: PE address %x out of range [0, %x] " | ||
250 | "on PHB#%x\n", | ||
251 | __func__, pe->addr, phb->ioda.total_pe, | ||
252 | hose->global_number); | ||
253 | return -EINVAL; | ||
254 | } | ||
255 | |||
256 | switch (option) { | ||
257 | case EEH_OPT_DISABLE: | ||
258 | return -EPERM; | ||
259 | case EEH_OPT_ENABLE: | ||
260 | return 0; | ||
261 | case EEH_OPT_THAW_MMIO: | ||
262 | enable = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; | ||
263 | break; | ||
264 | case EEH_OPT_THAW_DMA: | ||
265 | enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; | ||
266 | break; | ||
267 | case EEH_OPT_FREEZE_PE: | ||
268 | freeze_pe = true; | ||
269 | enable = OPAL_EEH_ACTION_SET_FREEZE_ALL; | ||
270 | break; | ||
271 | default: | ||
272 | pr_warn("%s: Invalid option %d\n", | ||
273 | __func__, option); | ||
274 | return -EINVAL; | ||
275 | } | ||
276 | |||
277 | /* If PHB supports compound PE, to handle it */ | ||
278 | if (freeze_pe) { | ||
279 | if (phb->freeze_pe) { | ||
280 | phb->freeze_pe(phb, pe->addr); | ||
281 | } else { | ||
282 | rc = opal_pci_eeh_freeze_set(phb->opal_id, | ||
283 | pe->addr, | ||
284 | enable); | ||
285 | if (rc != OPAL_SUCCESS) { | ||
286 | pr_warn("%s: Failure %lld freezing " | ||
287 | "PHB#%x-PE#%x\n", | ||
288 | __func__, rc, | ||
289 | phb->hose->global_number, pe->addr); | ||
290 | ret = -EIO; | ||
291 | } | ||
292 | } | ||
293 | } else { | ||
294 | if (phb->unfreeze_pe) { | ||
295 | ret = phb->unfreeze_pe(phb, pe->addr, enable); | ||
296 | } else { | ||
297 | rc = opal_pci_eeh_freeze_clear(phb->opal_id, | ||
298 | pe->addr, | ||
299 | enable); | ||
300 | if (rc != OPAL_SUCCESS) { | ||
301 | pr_warn("%s: Failure %lld enable %d " | ||
302 | "for PHB#%x-PE#%x\n", | ||
303 | __func__, rc, option, | ||
304 | phb->hose->global_number, pe->addr); | ||
305 | ret = -EIO; | ||
306 | } | ||
307 | } | ||
308 | } | ||
309 | |||
310 | return ret; | ||
311 | } | ||
312 | |||
313 | static void ioda_eeh_phb_diag(struct eeh_pe *pe) | ||
314 | { | ||
315 | struct pnv_phb *phb = pe->phb->private_data; | ||
316 | long rc; | ||
317 | |||
318 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, | ||
319 | PNV_PCI_DIAG_BUF_SIZE); | ||
320 | if (rc != OPAL_SUCCESS) | ||
321 | pr_warn("%s: Failed to get diag-data for PHB#%x (%ld)\n", | ||
322 | __func__, pe->phb->global_number, rc); | ||
323 | } | ||
324 | |||
325 | static int ioda_eeh_get_phb_state(struct eeh_pe *pe) | ||
326 | { | ||
327 | struct pnv_phb *phb = pe->phb->private_data; | ||
328 | u8 fstate; | ||
329 | __be16 pcierr; | ||
330 | s64 rc; | ||
331 | int result = 0; | ||
332 | |||
333 | rc = opal_pci_eeh_freeze_status(phb->opal_id, | ||
334 | pe->addr, | ||
335 | &fstate, | ||
336 | &pcierr, | ||
337 | NULL); | ||
338 | if (rc != OPAL_SUCCESS) { | ||
339 | pr_warn("%s: Failure %lld getting PHB#%x state\n", | ||
340 | __func__, rc, phb->hose->global_number); | ||
341 | return EEH_STATE_NOT_SUPPORT; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Check PHB state. If the PHB is frozen for the | ||
346 | * first time, to dump the PHB diag-data. | ||
347 | */ | ||
348 | if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { | ||
349 | result = (EEH_STATE_MMIO_ACTIVE | | ||
350 | EEH_STATE_DMA_ACTIVE | | ||
351 | EEH_STATE_MMIO_ENABLED | | ||
352 | EEH_STATE_DMA_ENABLED); | ||
353 | } else if (!(pe->state & EEH_PE_ISOLATED)) { | ||
354 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
355 | ioda_eeh_phb_diag(pe); | ||
356 | |||
357 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
358 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | ||
359 | } | ||
360 | |||
361 | return result; | ||
362 | } | ||
363 | |||
364 | static int ioda_eeh_get_pe_state(struct eeh_pe *pe) | ||
365 | { | ||
366 | struct pnv_phb *phb = pe->phb->private_data; | ||
367 | u8 fstate; | ||
368 | __be16 pcierr; | ||
369 | s64 rc; | ||
370 | int result; | ||
371 | |||
372 | /* | ||
373 | * We don't clobber hardware frozen state until PE | ||
374 | * reset is completed. In order to keep EEH core | ||
375 | * moving forward, we have to return operational | ||
376 | * state during PE reset. | ||
377 | */ | ||
378 | if (pe->state & EEH_PE_RESET) { | ||
379 | result = (EEH_STATE_MMIO_ACTIVE | | ||
380 | EEH_STATE_DMA_ACTIVE | | ||
381 | EEH_STATE_MMIO_ENABLED | | ||
382 | EEH_STATE_DMA_ENABLED); | ||
383 | return result; | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * Fetch PE state from hardware. If the PHB | ||
388 | * supports compound PE, let it handle that. | ||
389 | */ | ||
390 | if (phb->get_pe_state) { | ||
391 | fstate = phb->get_pe_state(phb, pe->addr); | ||
392 | } else { | ||
393 | rc = opal_pci_eeh_freeze_status(phb->opal_id, | ||
394 | pe->addr, | ||
395 | &fstate, | ||
396 | &pcierr, | ||
397 | NULL); | ||
398 | if (rc != OPAL_SUCCESS) { | ||
399 | pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", | ||
400 | __func__, rc, phb->hose->global_number, pe->addr); | ||
401 | return EEH_STATE_NOT_SUPPORT; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | /* Figure out state */ | ||
406 | switch (fstate) { | ||
407 | case OPAL_EEH_STOPPED_NOT_FROZEN: | ||
408 | result = (EEH_STATE_MMIO_ACTIVE | | ||
409 | EEH_STATE_DMA_ACTIVE | | ||
410 | EEH_STATE_MMIO_ENABLED | | ||
411 | EEH_STATE_DMA_ENABLED); | ||
412 | break; | ||
413 | case OPAL_EEH_STOPPED_MMIO_FREEZE: | ||
414 | result = (EEH_STATE_DMA_ACTIVE | | ||
415 | EEH_STATE_DMA_ENABLED); | ||
416 | break; | ||
417 | case OPAL_EEH_STOPPED_DMA_FREEZE: | ||
418 | result = (EEH_STATE_MMIO_ACTIVE | | ||
419 | EEH_STATE_MMIO_ENABLED); | ||
420 | break; | ||
421 | case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: | ||
422 | result = 0; | ||
423 | break; | ||
424 | case OPAL_EEH_STOPPED_RESET: | ||
425 | result = EEH_STATE_RESET_ACTIVE; | ||
426 | break; | ||
427 | case OPAL_EEH_STOPPED_TEMP_UNAVAIL: | ||
428 | result = EEH_STATE_UNAVAILABLE; | ||
429 | break; | ||
430 | case OPAL_EEH_STOPPED_PERM_UNAVAIL: | ||
431 | result = EEH_STATE_NOT_SUPPORT; | ||
432 | break; | ||
433 | default: | ||
434 | result = EEH_STATE_NOT_SUPPORT; | ||
435 | pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", | ||
436 | __func__, phb->hose->global_number, | ||
437 | pe->addr, fstate); | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * If PHB supports compound PE, to freeze all | ||
442 | * slave PEs for consistency. | ||
443 | * | ||
444 | * If the PE is switching to frozen state for the | ||
445 | * first time, to dump the PHB diag-data. | ||
446 | */ | ||
447 | if (!(result & EEH_STATE_NOT_SUPPORT) && | ||
448 | !(result & EEH_STATE_UNAVAILABLE) && | ||
449 | !(result & EEH_STATE_MMIO_ACTIVE) && | ||
450 | !(result & EEH_STATE_DMA_ACTIVE) && | ||
451 | !(pe->state & EEH_PE_ISOLATED)) { | ||
452 | if (phb->freeze_pe) | ||
453 | phb->freeze_pe(phb, pe->addr); | ||
454 | |||
455 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
456 | ioda_eeh_phb_diag(pe); | ||
457 | |||
458 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
459 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | ||
460 | } | ||
461 | |||
462 | return result; | ||
463 | } | ||
464 | |||
465 | /** | ||
466 | * ioda_eeh_get_state - Retrieve the state of PE | ||
467 | * @pe: EEH PE | ||
468 | * | ||
469 | * The PE's state should be retrieved from the PEEV, PEST | ||
470 | * IODA tables. Since the OPAL has exported the function | ||
471 | * to do it, it'd better to use that. | ||
472 | */ | ||
473 | static int ioda_eeh_get_state(struct eeh_pe *pe) | ||
474 | { | ||
475 | struct pnv_phb *phb = pe->phb->private_data; | ||
476 | |||
477 | /* Sanity check on PE number. PHB PE should have 0 */ | ||
478 | if (pe->addr < 0 || | ||
479 | pe->addr >= phb->ioda.total_pe) { | ||
480 | pr_warn("%s: PHB#%x-PE#%x out of range [0, %x]\n", | ||
481 | __func__, phb->hose->global_number, | ||
482 | pe->addr, phb->ioda.total_pe); | ||
483 | return EEH_STATE_NOT_SUPPORT; | ||
484 | } | ||
485 | |||
486 | if (pe->type & EEH_PE_PHB) | ||
487 | return ioda_eeh_get_phb_state(pe); | ||
488 | |||
489 | return ioda_eeh_get_pe_state(pe); | ||
490 | } | ||
491 | |||
492 | static s64 ioda_eeh_phb_poll(struct pnv_phb *phb) | ||
493 | { | ||
494 | s64 rc = OPAL_HARDWARE; | ||
495 | |||
496 | while (1) { | ||
497 | rc = opal_pci_poll(phb->opal_id); | ||
498 | if (rc <= 0) | ||
499 | break; | ||
500 | |||
501 | if (system_state < SYSTEM_RUNNING) | ||
502 | udelay(1000 * rc); | ||
503 | else | ||
504 | msleep(rc); | ||
505 | } | ||
506 | |||
507 | return rc; | ||
508 | } | ||
509 | |||
510 | int ioda_eeh_phb_reset(struct pci_controller *hose, int option) | ||
511 | { | ||
512 | struct pnv_phb *phb = hose->private_data; | ||
513 | s64 rc = OPAL_HARDWARE; | ||
514 | |||
515 | pr_debug("%s: Reset PHB#%x, option=%d\n", | ||
516 | __func__, hose->global_number, option); | ||
517 | |||
518 | /* Issue PHB complete reset request */ | ||
519 | if (option == EEH_RESET_FUNDAMENTAL || | ||
520 | option == EEH_RESET_HOT) | ||
521 | rc = opal_pci_reset(phb->opal_id, | ||
522 | OPAL_RESET_PHB_COMPLETE, | ||
523 | OPAL_ASSERT_RESET); | ||
524 | else if (option == EEH_RESET_DEACTIVATE) | ||
525 | rc = opal_pci_reset(phb->opal_id, | ||
526 | OPAL_RESET_PHB_COMPLETE, | ||
527 | OPAL_DEASSERT_RESET); | ||
528 | if (rc < 0) | ||
529 | goto out; | ||
530 | |||
531 | /* | ||
532 | * Poll state of the PHB until the request is done | ||
533 | * successfully. The PHB reset is usually PHB complete | ||
534 | * reset followed by hot reset on root bus. So we also | ||
535 | * need the PCI bus settlement delay. | ||
536 | */ | ||
537 | rc = ioda_eeh_phb_poll(phb); | ||
538 | if (option == EEH_RESET_DEACTIVATE) { | ||
539 | if (system_state < SYSTEM_RUNNING) | ||
540 | udelay(1000 * EEH_PE_RST_SETTLE_TIME); | ||
541 | else | ||
542 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
543 | } | ||
544 | out: | ||
545 | if (rc != OPAL_SUCCESS) | ||
546 | return -EIO; | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int ioda_eeh_root_reset(struct pci_controller *hose, int option) | ||
552 | { | ||
553 | struct pnv_phb *phb = hose->private_data; | ||
554 | s64 rc = OPAL_SUCCESS; | ||
555 | |||
556 | pr_debug("%s: Reset PHB#%x, option=%d\n", | ||
557 | __func__, hose->global_number, option); | ||
558 | |||
559 | /* | ||
560 | * During the reset deassert time, we needn't care | ||
561 | * the reset scope because the firmware does nothing | ||
562 | * for fundamental or hot reset during deassert phase. | ||
563 | */ | ||
564 | if (option == EEH_RESET_FUNDAMENTAL) | ||
565 | rc = opal_pci_reset(phb->opal_id, | ||
566 | OPAL_RESET_PCI_FUNDAMENTAL, | ||
567 | OPAL_ASSERT_RESET); | ||
568 | else if (option == EEH_RESET_HOT) | ||
569 | rc = opal_pci_reset(phb->opal_id, | ||
570 | OPAL_RESET_PCI_HOT, | ||
571 | OPAL_ASSERT_RESET); | ||
572 | else if (option == EEH_RESET_DEACTIVATE) | ||
573 | rc = opal_pci_reset(phb->opal_id, | ||
574 | OPAL_RESET_PCI_HOT, | ||
575 | OPAL_DEASSERT_RESET); | ||
576 | if (rc < 0) | ||
577 | goto out; | ||
578 | |||
579 | /* Poll state of the PHB until the request is done */ | ||
580 | rc = ioda_eeh_phb_poll(phb); | ||
581 | if (option == EEH_RESET_DEACTIVATE) | ||
582 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
583 | out: | ||
584 | if (rc != OPAL_SUCCESS) | ||
585 | return -EIO; | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option) | ||
591 | |||
592 | { | ||
593 | struct device_node *dn = pci_device_to_OF_node(dev); | ||
594 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | ||
595 | int aer = edev ? edev->aer_cap : 0; | ||
596 | u32 ctrl; | ||
597 | |||
598 | pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n", | ||
599 | __func__, pci_domain_nr(dev->bus), | ||
600 | dev->bus->number, option); | ||
601 | |||
602 | switch (option) { | ||
603 | case EEH_RESET_FUNDAMENTAL: | ||
604 | case EEH_RESET_HOT: | ||
605 | /* Don't report linkDown event */ | ||
606 | if (aer) { | ||
607 | eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK, | ||
608 | 4, &ctrl); | ||
609 | ctrl |= PCI_ERR_UNC_SURPDN; | ||
610 | eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK, | ||
611 | 4, ctrl); | ||
612 | } | ||
613 | |||
614 | eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl); | ||
615 | ctrl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
616 | eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl); | ||
617 | msleep(EEH_PE_RST_HOLD_TIME); | ||
618 | |||
619 | break; | ||
620 | case EEH_RESET_DEACTIVATE: | ||
621 | eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl); | ||
622 | ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
623 | eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl); | ||
624 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
625 | |||
626 | /* Continue reporting linkDown event */ | ||
627 | if (aer) { | ||
628 | eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK, | ||
629 | 4, &ctrl); | ||
630 | ctrl &= ~PCI_ERR_UNC_SURPDN; | ||
631 | eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK, | ||
632 | 4, ctrl); | ||
633 | } | ||
634 | |||
635 | break; | ||
636 | } | ||
637 | |||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | void pnv_pci_reset_secondary_bus(struct pci_dev *dev) | ||
642 | { | ||
643 | struct pci_controller *hose; | ||
644 | |||
645 | if (pci_is_root_bus(dev->bus)) { | ||
646 | hose = pci_bus_to_host(dev->bus); | ||
647 | ioda_eeh_root_reset(hose, EEH_RESET_HOT); | ||
648 | ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); | ||
649 | } else { | ||
650 | ioda_eeh_bridge_reset(dev, EEH_RESET_HOT); | ||
651 | ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * ioda_eeh_reset - Reset the indicated PE | ||
657 | * @pe: EEH PE | ||
658 | * @option: reset option | ||
659 | * | ||
660 | * Do reset on the indicated PE. For PCI bus sensitive PE, | ||
661 | * we need to reset the parent p2p bridge. The PHB has to | ||
662 | * be reinitialized if the p2p bridge is root bridge. For | ||
663 | * PCI device sensitive PE, we will try to reset the device | ||
664 | * through FLR. For now, we don't have OPAL APIs to do HARD | ||
665 | * reset yet, so all reset would be SOFT (HOT) reset. | ||
666 | */ | ||
667 | static int ioda_eeh_reset(struct eeh_pe *pe, int option) | ||
668 | { | ||
669 | struct pci_controller *hose = pe->phb; | ||
670 | struct pci_bus *bus; | ||
671 | int ret; | ||
672 | |||
673 | /* | ||
674 | * For PHB reset, we always have complete reset. For those PEs whose | ||
675 | * primary bus derived from root complex (root bus) or root port | ||
676 | * (usually bus#1), we apply hot or fundamental reset on the root port. | ||
677 | * For other PEs, we always have hot reset on the PE primary bus. | ||
678 | * | ||
679 | * Here, we have different design to pHyp, which always clear the | ||
680 | * frozen state during PE reset. However, the good idea here from | ||
681 | * benh is to keep frozen state before we get PE reset done completely | ||
682 | * (until BAR restore). With the frozen state, HW drops illegal IO | ||
683 | * or MMIO access, which can incur recrusive frozen PE during PE | ||
684 | * reset. The side effect is that EEH core has to clear the frozen | ||
685 | * state explicitly after BAR restore. | ||
686 | */ | ||
687 | if (pe->type & EEH_PE_PHB) { | ||
688 | ret = ioda_eeh_phb_reset(hose, option); | ||
689 | } else { | ||
690 | struct pnv_phb *phb; | ||
691 | s64 rc; | ||
692 | |||
693 | /* | ||
694 | * The frozen PE might be caused by PAPR error injection | ||
695 | * registers, which are expected to be cleared after hitting | ||
696 | * frozen PE as stated in the hardware spec. Unfortunately, | ||
697 | * that's not true on P7IOC. So we have to clear it manually | ||
698 | * to avoid recursive EEH errors during recovery. | ||
699 | */ | ||
700 | phb = hose->private_data; | ||
701 | if (phb->model == PNV_PHB_MODEL_P7IOC && | ||
702 | (option == EEH_RESET_HOT || | ||
703 | option == EEH_RESET_FUNDAMENTAL)) { | ||
704 | rc = opal_pci_reset(phb->opal_id, | ||
705 | OPAL_RESET_PHB_ERROR, | ||
706 | OPAL_ASSERT_RESET); | ||
707 | if (rc != OPAL_SUCCESS) { | ||
708 | pr_warn("%s: Failure %lld clearing " | ||
709 | "error injection registers\n", | ||
710 | __func__, rc); | ||
711 | return -EIO; | ||
712 | } | ||
713 | } | ||
714 | |||
715 | bus = eeh_pe_bus_get(pe); | ||
716 | if (pci_is_root_bus(bus) || | ||
717 | pci_is_root_bus(bus->parent)) | ||
718 | ret = ioda_eeh_root_reset(hose, option); | ||
719 | else | ||
720 | ret = ioda_eeh_bridge_reset(bus->self, option); | ||
721 | } | ||
722 | |||
723 | return ret; | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * ioda_eeh_get_log - Retrieve error log | ||
728 | * @pe: frozen PE | ||
729 | * @severity: permanent or temporary error | ||
730 | * @drv_log: device driver log | ||
731 | * @len: length of device driver log | ||
732 | * | ||
733 | * Retrieve error log, which contains log from device driver | ||
734 | * and firmware. | ||
735 | */ | ||
736 | static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, | ||
737 | char *drv_log, unsigned long len) | ||
738 | { | ||
739 | if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
740 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | ||
741 | |||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | /** | ||
746 | * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE | ||
747 | * @pe: EEH PE | ||
748 | * | ||
749 | * For particular PE, it might have included PCI bridges. In order | ||
750 | * to make the PE work properly, those PCI bridges should be configured | ||
751 | * correctly. However, we need do nothing on P7IOC since the reset | ||
752 | * function will do everything that should be covered by the function. | ||
753 | */ | ||
754 | static int ioda_eeh_configure_bridge(struct eeh_pe *pe) | ||
755 | { | ||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | static int ioda_eeh_err_inject(struct eeh_pe *pe, int type, int func, | ||
760 | unsigned long addr, unsigned long mask) | ||
761 | { | ||
762 | struct pci_controller *hose = pe->phb; | ||
763 | struct pnv_phb *phb = hose->private_data; | ||
764 | s64 ret; | ||
765 | |||
766 | /* Sanity check on error type */ | ||
767 | if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && | ||
768 | type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { | ||
769 | pr_warn("%s: Invalid error type %d\n", | ||
770 | __func__, type); | ||
771 | return -ERANGE; | ||
772 | } | ||
773 | |||
774 | if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || | ||
775 | func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { | ||
776 | pr_warn("%s: Invalid error function %d\n", | ||
777 | __func__, func); | ||
778 | return -ERANGE; | ||
779 | } | ||
780 | |||
781 | /* Firmware supports error injection ? */ | ||
782 | if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { | ||
783 | pr_warn("%s: Firmware doesn't support error injection\n", | ||
784 | __func__); | ||
785 | return -ENXIO; | ||
786 | } | ||
787 | |||
788 | /* Do error injection */ | ||
789 | ret = opal_pci_err_inject(phb->opal_id, pe->addr, | ||
790 | type, func, addr, mask); | ||
791 | if (ret != OPAL_SUCCESS) { | ||
792 | pr_warn("%s: Failure %lld injecting error " | ||
793 | "%d-%d to PHB#%x-PE#%x\n", | ||
794 | __func__, ret, type, func, | ||
795 | hose->global_number, pe->addr); | ||
796 | return -EIO; | ||
797 | } | ||
798 | |||
799 | return 0; | ||
800 | } | ||
801 | |||
802 | static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) | ||
803 | { | ||
804 | /* GEM */ | ||
805 | if (data->gemXfir || data->gemRfir || | ||
806 | data->gemRirqfir || data->gemMask || data->gemRwof) | ||
807 | pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", | ||
808 | be64_to_cpu(data->gemXfir), | ||
809 | be64_to_cpu(data->gemRfir), | ||
810 | be64_to_cpu(data->gemRirqfir), | ||
811 | be64_to_cpu(data->gemMask), | ||
812 | be64_to_cpu(data->gemRwof)); | ||
813 | |||
814 | /* LEM */ | ||
815 | if (data->lemFir || data->lemErrMask || | ||
816 | data->lemAction0 || data->lemAction1 || data->lemWof) | ||
817 | pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", | ||
818 | be64_to_cpu(data->lemFir), | ||
819 | be64_to_cpu(data->lemErrMask), | ||
820 | be64_to_cpu(data->lemAction0), | ||
821 | be64_to_cpu(data->lemAction1), | ||
822 | be64_to_cpu(data->lemWof)); | ||
823 | } | ||
824 | |||
825 | static void ioda_eeh_hub_diag(struct pci_controller *hose) | ||
826 | { | ||
827 | struct pnv_phb *phb = hose->private_data; | ||
828 | struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; | ||
829 | long rc; | ||
830 | |||
831 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); | ||
832 | if (rc != OPAL_SUCCESS) { | ||
833 | pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", | ||
834 | __func__, phb->hub_id, rc); | ||
835 | return; | ||
836 | } | ||
837 | |||
838 | switch (data->type) { | ||
839 | case OPAL_P7IOC_DIAG_TYPE_RGC: | ||
840 | pr_info("P7IOC diag-data for RGC\n\n"); | ||
841 | ioda_eeh_hub_diag_common(data); | ||
842 | if (data->rgc.rgcStatus || data->rgc.rgcLdcp) | ||
843 | pr_info(" RGC: %016llx %016llx\n", | ||
844 | be64_to_cpu(data->rgc.rgcStatus), | ||
845 | be64_to_cpu(data->rgc.rgcLdcp)); | ||
846 | break; | ||
847 | case OPAL_P7IOC_DIAG_TYPE_BI: | ||
848 | pr_info("P7IOC diag-data for BI %s\n\n", | ||
849 | data->bi.biDownbound ? "Downbound" : "Upbound"); | ||
850 | ioda_eeh_hub_diag_common(data); | ||
851 | if (data->bi.biLdcp0 || data->bi.biLdcp1 || | ||
852 | data->bi.biLdcp2 || data->bi.biFenceStatus) | ||
853 | pr_info(" BI: %016llx %016llx %016llx %016llx\n", | ||
854 | be64_to_cpu(data->bi.biLdcp0), | ||
855 | be64_to_cpu(data->bi.biLdcp1), | ||
856 | be64_to_cpu(data->bi.biLdcp2), | ||
857 | be64_to_cpu(data->bi.biFenceStatus)); | ||
858 | break; | ||
859 | case OPAL_P7IOC_DIAG_TYPE_CI: | ||
860 | pr_info("P7IOC diag-data for CI Port %d\n\n", | ||
861 | data->ci.ciPort); | ||
862 | ioda_eeh_hub_diag_common(data); | ||
863 | if (data->ci.ciPortStatus || data->ci.ciPortLdcp) | ||
864 | pr_info(" CI: %016llx %016llx\n", | ||
865 | be64_to_cpu(data->ci.ciPortStatus), | ||
866 | be64_to_cpu(data->ci.ciPortLdcp)); | ||
867 | break; | ||
868 | case OPAL_P7IOC_DIAG_TYPE_MISC: | ||
869 | pr_info("P7IOC diag-data for MISC\n\n"); | ||
870 | ioda_eeh_hub_diag_common(data); | ||
871 | break; | ||
872 | case OPAL_P7IOC_DIAG_TYPE_I2C: | ||
873 | pr_info("P7IOC diag-data for I2C\n\n"); | ||
874 | ioda_eeh_hub_diag_common(data); | ||
875 | break; | ||
876 | default: | ||
877 | pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", | ||
878 | __func__, phb->hub_id, data->type); | ||
879 | } | ||
880 | } | ||
881 | |||
882 | static int ioda_eeh_get_pe(struct pci_controller *hose, | ||
883 | u16 pe_no, struct eeh_pe **pe) | ||
884 | { | ||
885 | struct pnv_phb *phb = hose->private_data; | ||
886 | struct pnv_ioda_pe *pnv_pe; | ||
887 | struct eeh_pe *dev_pe; | ||
888 | struct eeh_dev edev; | ||
889 | |||
890 | /* | ||
891 | * If PHB supports compound PE, to fetch | ||
892 | * the master PE because slave PE is invisible | ||
893 | * to EEH core. | ||
894 | */ | ||
895 | pnv_pe = &phb->ioda.pe_array[pe_no]; | ||
896 | if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { | ||
897 | pnv_pe = pnv_pe->master; | ||
898 | WARN_ON(!pnv_pe || | ||
899 | !(pnv_pe->flags & PNV_IODA_PE_MASTER)); | ||
900 | pe_no = pnv_pe->pe_number; | ||
901 | } | ||
902 | |||
903 | /* Find the PE according to PE# */ | ||
904 | memset(&edev, 0, sizeof(struct eeh_dev)); | ||
905 | edev.phb = hose; | ||
906 | edev.pe_config_addr = pe_no; | ||
907 | dev_pe = eeh_pe_get(&edev); | ||
908 | if (!dev_pe) | ||
909 | return -EEXIST; | ||
910 | |||
911 | /* Freeze the (compound) PE */ | ||
912 | *pe = dev_pe; | ||
913 | if (!(dev_pe->state & EEH_PE_ISOLATED)) | ||
914 | phb->freeze_pe(phb, pe_no); | ||
915 | |||
916 | /* | ||
917 | * At this point, we're sure the (compound) PE should | ||
918 | * have been frozen. However, we still need poke until | ||
919 | * hitting the frozen PE on top level. | ||
920 | */ | ||
921 | dev_pe = dev_pe->parent; | ||
922 | while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { | ||
923 | int ret; | ||
924 | int active_flags = (EEH_STATE_MMIO_ACTIVE | | ||
925 | EEH_STATE_DMA_ACTIVE); | ||
926 | |||
927 | ret = eeh_ops->get_state(dev_pe, NULL); | ||
928 | if (ret <= 0 || (ret & active_flags) == active_flags) { | ||
929 | dev_pe = dev_pe->parent; | ||
930 | continue; | ||
931 | } | ||
932 | |||
933 | /* Frozen parent PE */ | ||
934 | *pe = dev_pe; | ||
935 | if (!(dev_pe->state & EEH_PE_ISOLATED)) | ||
936 | phb->freeze_pe(phb, dev_pe->addr); | ||
937 | |||
938 | /* Next one */ | ||
939 | dev_pe = dev_pe->parent; | ||
940 | } | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | /** | ||
946 | * ioda_eeh_next_error - Retrieve next error for EEH core to handle | ||
947 | * @pe: The affected PE | ||
948 | * | ||
949 | * The function is expected to be called by EEH core while it gets | ||
950 | * special EEH event (without binding PE). The function calls to | ||
951 | * OPAL APIs for next error to handle. The informational error is | ||
952 | * handled internally by platform. However, the dead IOC, dead PHB, | ||
953 | * fenced PHB and frozen PE should be handled by EEH core eventually. | ||
954 | */ | ||
955 | static int ioda_eeh_next_error(struct eeh_pe **pe) | ||
956 | { | ||
957 | struct pci_controller *hose; | ||
958 | struct pnv_phb *phb; | ||
959 | struct eeh_pe *phb_pe, *parent_pe; | ||
960 | __be64 frozen_pe_no; | ||
961 | __be16 err_type, severity; | ||
962 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | ||
963 | long rc; | ||
964 | int state, ret = EEH_NEXT_ERR_NONE; | ||
965 | |||
966 | /* | ||
967 | * While running here, it's safe to purge the event queue. | ||
968 | * And we should keep the cached OPAL notifier event sychronized | ||
969 | * between the kernel and firmware. | ||
970 | */ | ||
971 | eeh_remove_event(NULL, false); | ||
972 | opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); | ||
973 | |||
974 | list_for_each_entry(hose, &hose_list, list_node) { | ||
975 | /* | ||
976 | * If the subordinate PCI buses of the PHB has been | ||
977 | * removed or is exactly under error recovery, we | ||
978 | * needn't take care of it any more. | ||
979 | */ | ||
980 | phb = hose->private_data; | ||
981 | phb_pe = eeh_phb_pe_get(hose); | ||
982 | if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) | ||
983 | continue; | ||
984 | |||
985 | rc = opal_pci_next_error(phb->opal_id, | ||
986 | &frozen_pe_no, &err_type, &severity); | ||
987 | |||
988 | /* If OPAL API returns error, we needn't proceed */ | ||
989 | if (rc != OPAL_SUCCESS) { | ||
990 | pr_devel("%s: Invalid return value on " | ||
991 | "PHB#%x (0x%lx) from opal_pci_next_error", | ||
992 | __func__, hose->global_number, rc); | ||
993 | continue; | ||
994 | } | ||
995 | |||
996 | /* If the PHB doesn't have error, stop processing */ | ||
997 | if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || | ||
998 | be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { | ||
999 | pr_devel("%s: No error found on PHB#%x\n", | ||
1000 | __func__, hose->global_number); | ||
1001 | continue; | ||
1002 | } | ||
1003 | |||
1004 | /* | ||
1005 | * Processing the error. We're expecting the error with | ||
1006 | * highest priority reported upon multiple errors on the | ||
1007 | * specific PHB. | ||
1008 | */ | ||
1009 | pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", | ||
1010 | __func__, be16_to_cpu(err_type), be16_to_cpu(severity), | ||
1011 | be64_to_cpu(frozen_pe_no), hose->global_number); | ||
1012 | switch (be16_to_cpu(err_type)) { | ||
1013 | case OPAL_EEH_IOC_ERROR: | ||
1014 | if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { | ||
1015 | pr_err("EEH: dead IOC detected\n"); | ||
1016 | ret = EEH_NEXT_ERR_DEAD_IOC; | ||
1017 | } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { | ||
1018 | pr_info("EEH: IOC informative error " | ||
1019 | "detected\n"); | ||
1020 | ioda_eeh_hub_diag(hose); | ||
1021 | ret = EEH_NEXT_ERR_NONE; | ||
1022 | } | ||
1023 | |||
1024 | break; | ||
1025 | case OPAL_EEH_PHB_ERROR: | ||
1026 | if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { | ||
1027 | *pe = phb_pe; | ||
1028 | pr_err("EEH: dead PHB#%x detected, " | ||
1029 | "location: %s\n", | ||
1030 | hose->global_number, | ||
1031 | eeh_pe_loc_get(phb_pe)); | ||
1032 | ret = EEH_NEXT_ERR_DEAD_PHB; | ||
1033 | } else if (be16_to_cpu(severity) == | ||
1034 | OPAL_EEH_SEV_PHB_FENCED) { | ||
1035 | *pe = phb_pe; | ||
1036 | pr_err("EEH: Fenced PHB#%x detected, " | ||
1037 | "location: %s\n", | ||
1038 | hose->global_number, | ||
1039 | eeh_pe_loc_get(phb_pe)); | ||
1040 | ret = EEH_NEXT_ERR_FENCED_PHB; | ||
1041 | } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { | ||
1042 | pr_info("EEH: PHB#%x informative error " | ||
1043 | "detected, location: %s\n", | ||
1044 | hose->global_number, | ||
1045 | eeh_pe_loc_get(phb_pe)); | ||
1046 | ioda_eeh_phb_diag(phb_pe); | ||
1047 | pnv_pci_dump_phb_diag_data(hose, phb_pe->data); | ||
1048 | ret = EEH_NEXT_ERR_NONE; | ||
1049 | } | ||
1050 | |||
1051 | break; | ||
1052 | case OPAL_EEH_PE_ERROR: | ||
1053 | /* | ||
1054 | * If we can't find the corresponding PE, we | ||
1055 | * just try to unfreeze. | ||
1056 | */ | ||
1057 | if (ioda_eeh_get_pe(hose, | ||
1058 | be64_to_cpu(frozen_pe_no), pe)) { | ||
1059 | /* Try best to clear it */ | ||
1060 | pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", | ||
1061 | hose->global_number, frozen_pe_no); | ||
1062 | pr_info("EEH: PHB location: %s\n", | ||
1063 | eeh_pe_loc_get(phb_pe)); | ||
1064 | opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no, | ||
1065 | OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); | ||
1066 | ret = EEH_NEXT_ERR_NONE; | ||
1067 | } else if ((*pe)->state & EEH_PE_ISOLATED || | ||
1068 | eeh_pe_passed(*pe)) { | ||
1069 | ret = EEH_NEXT_ERR_NONE; | ||
1070 | } else { | ||
1071 | pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", | ||
1072 | (*pe)->addr, (*pe)->phb->global_number); | ||
1073 | pr_err("EEH: PE location: %s, PHB location: %s\n", | ||
1074 | eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe)); | ||
1075 | ret = EEH_NEXT_ERR_FROZEN_PE; | ||
1076 | } | ||
1077 | |||
1078 | break; | ||
1079 | default: | ||
1080 | pr_warn("%s: Unexpected error type %d\n", | ||
1081 | __func__, be16_to_cpu(err_type)); | ||
1082 | } | ||
1083 | |||
1084 | /* | ||
1085 | * EEH core will try recover from fenced PHB or | ||
1086 | * frozen PE. In the time for frozen PE, EEH core | ||
1087 | * enable IO path for that before collecting logs, | ||
1088 | * but it ruins the site. So we have to dump the | ||
1089 | * log in advance here. | ||
1090 | */ | ||
1091 | if ((ret == EEH_NEXT_ERR_FROZEN_PE || | ||
1092 | ret == EEH_NEXT_ERR_FENCED_PHB) && | ||
1093 | !((*pe)->state & EEH_PE_ISOLATED)) { | ||
1094 | eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); | ||
1095 | ioda_eeh_phb_diag(*pe); | ||
1096 | |||
1097 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
1098 | pnv_pci_dump_phb_diag_data((*pe)->phb, | ||
1099 | (*pe)->data); | ||
1100 | } | ||
1101 | |||
1102 | /* | ||
1103 | * We probably have the frozen parent PE out there and | ||
1104 | * we need have to handle frozen parent PE firstly. | ||
1105 | */ | ||
1106 | if (ret == EEH_NEXT_ERR_FROZEN_PE) { | ||
1107 | parent_pe = (*pe)->parent; | ||
1108 | while (parent_pe) { | ||
1109 | /* Hit the ceiling ? */ | ||
1110 | if (parent_pe->type & EEH_PE_PHB) | ||
1111 | break; | ||
1112 | |||
1113 | /* Frozen parent PE ? */ | ||
1114 | state = ioda_eeh_get_state(parent_pe); | ||
1115 | if (state > 0 && | ||
1116 | (state & active_flags) != active_flags) | ||
1117 | *pe = parent_pe; | ||
1118 | |||
1119 | /* Next parent level */ | ||
1120 | parent_pe = parent_pe->parent; | ||
1121 | } | ||
1122 | |||
1123 | /* We possibly migrate to another PE */ | ||
1124 | eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); | ||
1125 | } | ||
1126 | |||
1127 | /* | ||
1128 | * If we have no errors on the specific PHB or only | ||
1129 | * informative error there, we continue poking it. | ||
1130 | * Otherwise, we need actions to be taken by upper | ||
1131 | * layer. | ||
1132 | */ | ||
1133 | if (ret > EEH_NEXT_ERR_INF) | ||
1134 | break; | ||
1135 | } | ||
1136 | |||
1137 | return ret; | ||
1138 | } | ||
1139 | |||
1140 | struct pnv_eeh_ops ioda_eeh_ops = { | ||
1141 | .post_init = ioda_eeh_post_init, | ||
1142 | .set_option = ioda_eeh_set_option, | ||
1143 | .get_state = ioda_eeh_get_state, | ||
1144 | .reset = ioda_eeh_reset, | ||
1145 | .get_log = ioda_eeh_get_log, | ||
1146 | .configure_bridge = ioda_eeh_configure_bridge, | ||
1147 | .err_inject = ioda_eeh_err_inject, | ||
1148 | .next_error = ioda_eeh_next_error | ||
1149 | }; | ||