aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-10 11:28:32 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-10 11:28:32 -0500
commita62e68488dd5ddb07776555fd7e0435c6d021ac1 (patch)
treed6cec15baa1ddfee108ef77b315dfdea5e3fa71c /arch
parentab396e91bfe953db26fa1083d9c3e7a4fbe0334a (diff)
parent3b212db9217d02e623eaa12f41c9b5f8c6a99535 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig11
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kernel/rtas_pci.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c24
-rw-r--r--arch/powerpc/kernel/systbl.S6
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/platforms/maple/setup.c3
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c489
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c316
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c376
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c39
16 files changed, 854 insertions, 425 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 28004f002ec9..935d96571515 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -275,6 +275,7 @@ config PPC_PSERIES
275 select PPC_I8259 275 select PPC_I8259
276 select PPC_RTAS 276 select PPC_RTAS
277 select RTAS_ERROR_LOGGING 277 select RTAS_ERROR_LOGGING
278 select PPC_UDBG_16550
278 default y 279 default y
279 280
280config PPC_CHRP 281config PPC_CHRP
@@ -284,6 +285,7 @@ config PPC_CHRP
284 select PPC_INDIRECT_PCI 285 select PPC_INDIRECT_PCI
285 select PPC_RTAS 286 select PPC_RTAS
286 select PPC_MPC106 287 select PPC_MPC106
288 select PPC_UDBG_16550
287 default y 289 default y
288 290
289config PPC_PMAC 291config PPC_PMAC
@@ -306,6 +308,7 @@ config PPC_PREP
306 depends on PPC_MULTIPLATFORM && PPC32 && BROKEN 308 depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
307 select PPC_I8259 309 select PPC_I8259
308 select PPC_INDIRECT_PCI 310 select PPC_INDIRECT_PCI
311 select PPC_UDBG_16550
309 default y 312 default y
310 313
311config PPC_MAPLE 314config PPC_MAPLE
@@ -314,6 +317,7 @@ config PPC_MAPLE
314 select U3_DART 317 select U3_DART
315 select MPIC_BROKEN_U3 318 select MPIC_BROKEN_U3
316 select GENERIC_TBSYNC 319 select GENERIC_TBSYNC
320 select PPC_UDBG_16550
317 default n 321 default n
318 help 322 help
319 This option enables support for the Maple 970FX Evaluation Board. 323 This option enables support for the Maple 970FX Evaluation Board.
@@ -324,6 +328,7 @@ config PPC_CELL
324 depends on PPC_MULTIPLATFORM && PPC64 328 depends on PPC_MULTIPLATFORM && PPC64
325 select PPC_RTAS 329 select PPC_RTAS
326 select MMIO_NVRAM 330 select MMIO_NVRAM
331 select PPC_UDBG_16550
327 332
328config PPC_OF 333config PPC_OF
329 def_bool y 334 def_bool y
@@ -370,6 +375,10 @@ config MPIC_BROKEN_U3
370 depends on PPC_MAPLE 375 depends on PPC_MAPLE
371 default y 376 default y
372 377
378config PPC_UDBG_16550
379 bool
380 default n
381
373config CELL_IIC 382config CELL_IIC
374 depends on PPC_CELL 383 depends on PPC_CELL
375 bool 384 bool
@@ -403,7 +412,7 @@ config PPC_MPC106
403 412
404config GENERIC_TBSYNC 413config GENERIC_TBSYNC
405 bool 414 bool
406 default y if CONFIG_PPC32 && CONFIG_SMP 415 default y if PPC32 && SMP
407 default n 416 default n
408 417
409source "drivers/cpufreq/Kconfig" 418source "drivers/cpufreq/Kconfig"
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 17ed5018288b..144e284d21dd 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -54,7 +54,7 @@ obj-$(CONFIG_BOOTX_TEXT) += btext.o
54obj-$(CONFIG_6xx) += idle_6xx.o 54obj-$(CONFIG_6xx) += idle_6xx.o
55obj-$(CONFIG_SMP) += smp.o 55obj-$(CONFIG_SMP) += smp.o
56obj-$(CONFIG_KPROBES) += kprobes.o 56obj-$(CONFIG_KPROBES) += kprobes.o
57obj-$(CONFIG_SERIAL_8250) += legacy_serial.o udbg_16550.o 57obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
58module-$(CONFIG_PPC64) += module_64.o 58module-$(CONFIG_PPC64) += module_64.o
59obj-$(CONFIG_MODULES) += $(module-y) 59obj-$(CONFIG_MODULES) += $(module-y)
60 60
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index fc60a773af7d..ba21a6c4f467 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -381,7 +381,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
381 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); 381 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
382 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); 382 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
383 383
384 dev->cfg_size = 256; /*pci_cfg_space_size(dev);*/ 384 dev->cfg_size = pci_cfg_space_size(dev);
385 385
386 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 386 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
387 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 387 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index b2758148a0de..16d9a904f3cb 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -244,7 +244,6 @@ EXPORT_SYMBOL(set_context);
244extern long mol_trampoline; 244extern long mol_trampoline;
245EXPORT_SYMBOL(mol_trampoline); /* For MOL */ 245EXPORT_SYMBOL(mol_trampoline); /* For MOL */
246EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ 246EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
247EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
248#ifdef CONFIG_SMP 247#ifdef CONFIG_SMP
249extern int mmu_hash_lock; 248extern int mmu_hash_lock;
250EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ 249EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 45b8109951fe..5579f6559912 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -72,7 +72,7 @@ static int of_device_available(struct device_node * dn)
72 return 0; 72 return 0;
73} 73}
74 74
75static int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) 75int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
76{ 76{
77 int returnval = -1; 77 int returnval = -1;
78 unsigned long buid, addr; 78 unsigned long buid, addr;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index e5d285adb496..db72a92943bf 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -299,9 +299,7 @@ void __init setup_arch(char **cmdline_p)
299 if (ppc_md.init_early) 299 if (ppc_md.init_early)
300 ppc_md.init_early(); 300 ppc_md.init_early();
301 301
302#ifdef CONFIG_SERIAL_8250
303 find_legacy_serial_ports(); 302 find_legacy_serial_ports();
304#endif
305 finish_device_tree(); 303 finish_device_tree();
306 304
307 smp_setup_cpu_maps(); 305 smp_setup_cpu_maps();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 81567e931260..c4b76961d6de 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -472,9 +472,7 @@ void __init setup_system(void)
472 * hash table management for us, thus ioremap works. We do that early 472 * hash table management for us, thus ioremap works. We do that early
473 * so that further code can be debugged 473 * so that further code can be debugged
474 */ 474 */
475#ifdef CONFIG_SERIAL_8250
476 find_legacy_serial_ports(); 475 find_legacy_serial_ports();
477#endif
478 476
479 /* 477 /*
480 * "Finish" the device-tree, that is do the actual parsing of 478 * "Finish" the device-tree, that is do the actual parsing of
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 0ee44be4ab73..475249dc2350 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -552,30 +552,6 @@ asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec
552 return ret; 552 return ret;
553} 553}
554 554
555asmlinkage int compat_sys_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
556{
557 return sys_pciconfig_read((unsigned long) bus,
558 (unsigned long) dfn,
559 (unsigned long) off,
560 (unsigned long) len,
561 compat_ptr(ubuf));
562}
563
564asmlinkage int compat_sys_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
565{
566 return sys_pciconfig_write((unsigned long) bus,
567 (unsigned long) dfn,
568 (unsigned long) off,
569 (unsigned long) len,
570 compat_ptr(ubuf));
571}
572
573asmlinkage int compat_sys_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
574{
575 return sys_pciconfig_iobase(which, in_bus, in_devfn);
576}
577
578
579/* Note: it is necessary to treat mode as an unsigned int, 555/* Note: it is necessary to treat mode as an unsigned int,
580 * with the corresponding cast to a signed int to insure that the 556 * with the corresponding cast to a signed int to insure that the
581 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 557 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index dd2ab85e3513..68013179a503 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -239,9 +239,9 @@ SYS32ONLY(ftruncate64)
239SYSX(sys_ni_syscall,sys_stat64,sys_stat64) 239SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
240SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64) 240SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
241SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64) 241SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
242COMPAT_SYS(pciconfig_read) 242SYSCALL(pciconfig_read)
243COMPAT_SYS(pciconfig_write) 243SYSCALL(pciconfig_write)
244COMPAT_SYS(pciconfig_iobase) 244SYSCALL(pciconfig_iobase)
245SYSCALL(ni_syscall) 245SYSCALL(ni_syscall)
246SYSCALL(getdents64) 246SYSCALL(getdents64)
247SYSCALL(pivot_root) 247SYSCALL(pivot_root)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 2ffca63602c5..7b278d83739e 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -174,7 +174,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
174 pa = addr & PAGE_MASK; 174 pa = addr & PAGE_MASK;
175 size = PAGE_ALIGN(addr + size) - pa; 175 size = PAGE_ALIGN(addr + size) - pa;
176 176
177 if (size == 0) 177 if ((size == 0) || (pa == 0))
178 return NULL; 178 return NULL;
179 179
180 if (mem_init_done) { 180 if (mem_init_done) {
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index dd73e38bfb7d..a1cb4d236720 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -71,9 +71,6 @@
71#define DBG(fmt...) 71#define DBG(fmt...)
72#endif 72#endif
73 73
74extern void generic_find_legacy_serial_ports(u64 *physport,
75 unsigned int *default_speed);
76
77static void maple_restart(char *cmd) 74static void maple_restart(char *cmd)
78{ 75{
79 unsigned int maple_nvram_base; 76 unsigned int maple_nvram_base;
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 6accdd155505..61616d144072 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_IBMVIO) += vio.o 4obj-$(CONFIG_IBMVIO) += vio.o
5obj-$(CONFIG_XICS) += xics.o 5obj-$(CONFIG_XICS) += xics.o
6obj-$(CONFIG_SCANLOG) += scanlog.o 6obj-$(CONFIG_SCANLOG) += scanlog.o
7obj-$(CONFIG_EEH) += eeh.o eeh_event.o 7obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o
8 8
9obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 9obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
10obj-$(CONFIG_HVCS) += hvcserver.o 10obj-$(CONFIG_HVCS) += hvcserver.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 7fbfd16d72b7..17cea7f2afd3 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -76,15 +76,14 @@
76 */ 76 */
77#define EEH_MAX_FAILS 100000 77#define EEH_MAX_FAILS 100000
78 78
79/* Misc forward declaraions */
80static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn);
81
82/* RTAS tokens */ 79/* RTAS tokens */
83static int ibm_set_eeh_option; 80static int ibm_set_eeh_option;
84static int ibm_set_slot_reset; 81static int ibm_set_slot_reset;
85static int ibm_read_slot_reset_state; 82static int ibm_read_slot_reset_state;
86static int ibm_read_slot_reset_state2; 83static int ibm_read_slot_reset_state2;
87static int ibm_slot_error_detail; 84static int ibm_slot_error_detail;
85static int ibm_get_config_addr_info;
86static int ibm_configure_bridge;
88 87
89int eeh_subsystem_enabled; 88int eeh_subsystem_enabled;
90EXPORT_SYMBOL(eeh_subsystem_enabled); 89EXPORT_SYMBOL(eeh_subsystem_enabled);
@@ -98,308 +97,23 @@ static DEFINE_SPINLOCK(slot_errbuf_lock);
98static int eeh_error_buf_size; 97static int eeh_error_buf_size;
99 98
100/* System monitoring statistics */ 99/* System monitoring statistics */
101static DEFINE_PER_CPU(unsigned long, no_device); 100static unsigned long no_device;
102static DEFINE_PER_CPU(unsigned long, no_dn); 101static unsigned long no_dn;
103static DEFINE_PER_CPU(unsigned long, no_cfg_addr); 102static unsigned long no_cfg_addr;
104static DEFINE_PER_CPU(unsigned long, ignored_check); 103static unsigned long ignored_check;
105static DEFINE_PER_CPU(unsigned long, total_mmio_ffs); 104static unsigned long total_mmio_ffs;
106static DEFINE_PER_CPU(unsigned long, false_positives); 105static unsigned long false_positives;
107static DEFINE_PER_CPU(unsigned long, ignored_failures); 106static unsigned long ignored_failures;
108static DEFINE_PER_CPU(unsigned long, slot_resets); 107static unsigned long slot_resets;
109
110/**
111 * The pci address cache subsystem. This subsystem places
112 * PCI device address resources into a red-black tree, sorted
113 * according to the address range, so that given only an i/o
114 * address, the corresponding PCI device can be **quickly**
115 * found. It is safe to perform an address lookup in an interrupt
116 * context; this ability is an important feature.
117 *
118 * Currently, the only customer of this code is the EEH subsystem;
119 * thus, this code has been somewhat tailored to suit EEH better.
120 * In particular, the cache does *not* hold the addresses of devices
121 * for which EEH is not enabled.
122 *
123 * (Implementation Note: The RB tree seems to be better/faster
124 * than any hash algo I could think of for this problem, even
125 * with the penalty of slow pointer chases for d-cache misses).
126 */
127struct pci_io_addr_range
128{
129 struct rb_node rb_node;
130 unsigned long addr_lo;
131 unsigned long addr_hi;
132 struct pci_dev *pcidev;
133 unsigned int flags;
134};
135
136static struct pci_io_addr_cache
137{
138 struct rb_root rb_root;
139 spinlock_t piar_lock;
140} pci_io_addr_cache_root;
141
142static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
143{
144 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
145
146 while (n) {
147 struct pci_io_addr_range *piar;
148 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
149
150 if (addr < piar->addr_lo) {
151 n = n->rb_left;
152 } else {
153 if (addr > piar->addr_hi) {
154 n = n->rb_right;
155 } else {
156 pci_dev_get(piar->pcidev);
157 return piar->pcidev;
158 }
159 }
160 }
161
162 return NULL;
163}
164
165/**
166 * pci_get_device_by_addr - Get device, given only address
167 * @addr: mmio (PIO) phys address or i/o port number
168 *
169 * Given an mmio phys address, or a port number, find a pci device
170 * that implements this address. Be sure to pci_dev_put the device
171 * when finished. I/O port numbers are assumed to be offset
172 * from zero (that is, they do *not* have pci_io_addr added in).
173 * It is safe to call this function within an interrupt.
174 */
175static struct pci_dev *pci_get_device_by_addr(unsigned long addr)
176{
177 struct pci_dev *dev;
178 unsigned long flags;
179
180 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
181 dev = __pci_get_device_by_addr(addr);
182 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
183 return dev;
184}
185
186#ifdef DEBUG
187/*
188 * Handy-dandy debug print routine, does nothing more
189 * than print out the contents of our addr cache.
190 */
191static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
192{
193 struct rb_node *n;
194 int cnt = 0;
195
196 n = rb_first(&cache->rb_root);
197 while (n) {
198 struct pci_io_addr_range *piar;
199 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
200 printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n",
201 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
202 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
203 cnt++;
204 n = rb_next(n);
205 }
206}
207#endif
208
209/* Insert address range into the rb tree. */
210static struct pci_io_addr_range *
211pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
212 unsigned long ahi, unsigned int flags)
213{
214 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
215 struct rb_node *parent = NULL;
216 struct pci_io_addr_range *piar;
217
218 /* Walk tree, find a place to insert into tree */
219 while (*p) {
220 parent = *p;
221 piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
222 if (ahi < piar->addr_lo) {
223 p = &parent->rb_left;
224 } else if (alo > piar->addr_hi) {
225 p = &parent->rb_right;
226 } else {
227 if (dev != piar->pcidev ||
228 alo != piar->addr_lo || ahi != piar->addr_hi) {
229 printk(KERN_WARNING "PIAR: overlapping address range\n");
230 }
231 return piar;
232 }
233 }
234 piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
235 if (!piar)
236 return NULL;
237 108
238 piar->addr_lo = alo; 109#define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
239 piar->addr_hi = ahi;
240 piar->pcidev = dev;
241 piar->flags = flags;
242
243#ifdef DEBUG
244 printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
245 alo, ahi, pci_name (dev));
246#endif
247
248 rb_link_node(&piar->rb_node, parent, p);
249 rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
250
251 return piar;
252}
253
254static void __pci_addr_cache_insert_device(struct pci_dev *dev)
255{
256 struct device_node *dn;
257 struct pci_dn *pdn;
258 int i;
259 int inserted = 0;
260
261 dn = pci_device_to_OF_node(dev);
262 if (!dn) {
263 printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
264 return;
265 }
266
267 /* Skip any devices for which EEH is not enabled. */
268 pdn = PCI_DN(dn);
269 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
270 pdn->eeh_mode & EEH_MODE_NOCHECK) {
271#ifdef DEBUG
272 printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
273 pci_name(dev), pdn->node->full_name);
274#endif
275 return;
276 }
277
278 /* The cache holds a reference to the device... */
279 pci_dev_get(dev);
280
281 /* Walk resources on this device, poke them into the tree */
282 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
283 unsigned long start = pci_resource_start(dev,i);
284 unsigned long end = pci_resource_end(dev,i);
285 unsigned int flags = pci_resource_flags(dev,i);
286
287 /* We are interested only bus addresses, not dma or other stuff */
288 if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
289 continue;
290 if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
291 continue;
292 pci_addr_cache_insert(dev, start, end, flags);
293 inserted = 1;
294 }
295
296 /* If there was nothing to add, the cache has no reference... */
297 if (!inserted)
298 pci_dev_put(dev);
299}
300
301/**
302 * pci_addr_cache_insert_device - Add a device to the address cache
303 * @dev: PCI device whose I/O addresses we are interested in.
304 *
305 * In order to support the fast lookup of devices based on addresses,
306 * we maintain a cache of devices that can be quickly searched.
307 * This routine adds a device to that cache.
308 */
309static void pci_addr_cache_insert_device(struct pci_dev *dev)
310{
311 unsigned long flags;
312
313 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
314 __pci_addr_cache_insert_device(dev);
315 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
316}
317
318static inline void __pci_addr_cache_remove_device(struct pci_dev *dev)
319{
320 struct rb_node *n;
321 int removed = 0;
322
323restart:
324 n = rb_first(&pci_io_addr_cache_root.rb_root);
325 while (n) {
326 struct pci_io_addr_range *piar;
327 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
328
329 if (piar->pcidev == dev) {
330 rb_erase(n, &pci_io_addr_cache_root.rb_root);
331 removed = 1;
332 kfree(piar);
333 goto restart;
334 }
335 n = rb_next(n);
336 }
337
338 /* The cache no longer holds its reference to this device... */
339 if (removed)
340 pci_dev_put(dev);
341}
342
343/**
344 * pci_addr_cache_remove_device - remove pci device from addr cache
345 * @dev: device to remove
346 *
347 * Remove a device from the addr-cache tree.
348 * This is potentially expensive, since it will walk
349 * the tree multiple times (once per resource).
350 * But so what; device removal doesn't need to be that fast.
351 */
352static void pci_addr_cache_remove_device(struct pci_dev *dev)
353{
354 unsigned long flags;
355
356 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
357 __pci_addr_cache_remove_device(dev);
358 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
359}
360
361/**
362 * pci_addr_cache_build - Build a cache of I/O addresses
363 *
364 * Build a cache of pci i/o addresses. This cache will be used to
365 * find the pci device that corresponds to a given address.
366 * This routine scans all pci busses to build the cache.
367 * Must be run late in boot process, after the pci controllers
368 * have been scaned for devices (after all device resources are known).
369 */
370void __init pci_addr_cache_build(void)
371{
372 struct device_node *dn;
373 struct pci_dev *dev = NULL;
374
375 if (!eeh_subsystem_enabled)
376 return;
377
378 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
379
380 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
381 /* Ignore PCI bridges ( XXX why ??) */
382 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
383 continue;
384 }
385 pci_addr_cache_insert_device(dev);
386
387 /* Save the BAR's; firmware doesn't restore these after EEH reset */
388 dn = pci_device_to_OF_node(dev);
389 eeh_save_bars(dev, PCI_DN(dn));
390 }
391
392#ifdef DEBUG
393 /* Verify tree built up above, echo back the list of addrs. */
394 pci_addr_cache_print(&pci_io_addr_cache_root);
395#endif
396}
397 110
398/* --------------------------------------------------------------- */ 111/* --------------------------------------------------------------- */
399/* Above lies the PCI Address Cache. Below lies the EEH event infrastructure */ 112/* Below lies the EEH event infrastructure */
400 113
401void eeh_slot_error_detail (struct pci_dn *pdn, int severity) 114void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
402{ 115{
116 int config_addr;
403 unsigned long flags; 117 unsigned long flags;
404 int rc; 118 int rc;
405 119
@@ -407,8 +121,13 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
407 spin_lock_irqsave(&slot_errbuf_lock, flags); 121 spin_lock_irqsave(&slot_errbuf_lock, flags);
408 memset(slot_errbuf, 0, eeh_error_buf_size); 122 memset(slot_errbuf, 0, eeh_error_buf_size);
409 123
124 /* Use PE configuration address, if present */
125 config_addr = pdn->eeh_config_addr;
126 if (pdn->eeh_pe_config_addr)
127 config_addr = pdn->eeh_pe_config_addr;
128
410 rc = rtas_call(ibm_slot_error_detail, 129 rc = rtas_call(ibm_slot_error_detail,
411 8, 1, NULL, pdn->eeh_config_addr, 130 8, 1, NULL, config_addr,
412 BUID_HI(pdn->phb->buid), 131 BUID_HI(pdn->phb->buid),
413 BUID_LO(pdn->phb->buid), NULL, 0, 132 BUID_LO(pdn->phb->buid), NULL, 0,
414 virt_to_phys(slot_errbuf), 133 virt_to_phys(slot_errbuf),
@@ -428,6 +147,7 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
428static int read_slot_reset_state(struct pci_dn *pdn, int rets[]) 147static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
429{ 148{
430 int token, outputs; 149 int token, outputs;
150 int config_addr;
431 151
432 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 152 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
433 token = ibm_read_slot_reset_state2; 153 token = ibm_read_slot_reset_state2;
@@ -438,7 +158,12 @@ static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
438 outputs = 3; 158 outputs = 3;
439 } 159 }
440 160
441 return rtas_call(token, 3, outputs, rets, pdn->eeh_config_addr, 161 /* Use PE configuration address, if present */
162 config_addr = pdn->eeh_config_addr;
163 if (pdn->eeh_pe_config_addr)
164 config_addr = pdn->eeh_pe_config_addr;
165
166 return rtas_call(token, 3, outputs, rets, config_addr,
442 BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid)); 167 BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid));
443} 168}
444 169
@@ -462,7 +187,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
462/** 187/**
463 * Return the "partitionable endpoint" (pe) under which this device lies 188 * Return the "partitionable endpoint" (pe) under which this device lies
464 */ 189 */
465static struct device_node * find_device_pe(struct device_node *dn) 190struct device_node * find_device_pe(struct device_node *dn)
466{ 191{
467 while ((dn->parent) && PCI_DN(dn->parent) && 192 while ((dn->parent) && PCI_DN(dn->parent) &&
468 (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) { 193 (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
@@ -485,6 +210,11 @@ static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
485 if (PCI_DN(dn)) { 210 if (PCI_DN(dn)) {
486 PCI_DN(dn)->eeh_mode |= mode_flag; 211 PCI_DN(dn)->eeh_mode |= mode_flag;
487 212
213 /* Mark the pci device driver too */
214 struct pci_dev *dev = PCI_DN(dn)->pcidev;
215 if (dev && dev->driver)
216 dev->error_state = pci_channel_io_frozen;
217
488 if (dn->child) 218 if (dn->child)
489 __eeh_mark_slot (dn->child, mode_flag); 219 __eeh_mark_slot (dn->child, mode_flag);
490 } 220 }
@@ -495,6 +225,11 @@ static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
495void eeh_mark_slot (struct device_node *dn, int mode_flag) 225void eeh_mark_slot (struct device_node *dn, int mode_flag)
496{ 226{
497 dn = find_device_pe (dn); 227 dn = find_device_pe (dn);
228
229 /* Back up one, since config addrs might be shared */
230 if (PCI_DN(dn) && PCI_DN(dn)->eeh_pe_config_addr)
231 dn = dn->parent;
232
498 PCI_DN(dn)->eeh_mode |= mode_flag; 233 PCI_DN(dn)->eeh_mode |= mode_flag;
499 __eeh_mark_slot (dn->child, mode_flag); 234 __eeh_mark_slot (dn->child, mode_flag);
500} 235}
@@ -516,7 +251,13 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag)
516{ 251{
517 unsigned long flags; 252 unsigned long flags;
518 spin_lock_irqsave(&confirm_error_lock, flags); 253 spin_lock_irqsave(&confirm_error_lock, flags);
254
519 dn = find_device_pe (dn); 255 dn = find_device_pe (dn);
256
257 /* Back up one, since config addrs might be shared */
258 if (PCI_DN(dn) && PCI_DN(dn)->eeh_pe_config_addr)
259 dn = dn->parent;
260
520 PCI_DN(dn)->eeh_mode &= ~mode_flag; 261 PCI_DN(dn)->eeh_mode &= ~mode_flag;
521 PCI_DN(dn)->eeh_check_count = 0; 262 PCI_DN(dn)->eeh_check_count = 0;
522 __eeh_clear_slot (dn->child, mode_flag); 263 __eeh_clear_slot (dn->child, mode_flag);
@@ -544,15 +285,16 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
544 int rets[3]; 285 int rets[3];
545 unsigned long flags; 286 unsigned long flags;
546 struct pci_dn *pdn; 287 struct pci_dn *pdn;
288 enum pci_channel_state state;
547 int rc = 0; 289 int rc = 0;
548 290
549 __get_cpu_var(total_mmio_ffs)++; 291 total_mmio_ffs++;
550 292
551 if (!eeh_subsystem_enabled) 293 if (!eeh_subsystem_enabled)
552 return 0; 294 return 0;
553 295
554 if (!dn) { 296 if (!dn) {
555 __get_cpu_var(no_dn)++; 297 no_dn++;
556 return 0; 298 return 0;
557 } 299 }
558 pdn = PCI_DN(dn); 300 pdn = PCI_DN(dn);
@@ -560,7 +302,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
560 /* Access to IO BARs might get this far and still not want checking. */ 302 /* Access to IO BARs might get this far and still not want checking. */
561 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || 303 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
562 pdn->eeh_mode & EEH_MODE_NOCHECK) { 304 pdn->eeh_mode & EEH_MODE_NOCHECK) {
563 __get_cpu_var(ignored_check)++; 305 ignored_check++;
564#ifdef DEBUG 306#ifdef DEBUG
565 printk ("EEH:ignored check (%x) for %s %s\n", 307 printk ("EEH:ignored check (%x) for %s %s\n",
566 pdn->eeh_mode, pci_name (dev), dn->full_name); 308 pdn->eeh_mode, pci_name (dev), dn->full_name);
@@ -568,8 +310,8 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
568 return 0; 310 return 0;
569 } 311 }
570 312
571 if (!pdn->eeh_config_addr) { 313 if (!pdn->eeh_config_addr && !pdn->eeh_pe_config_addr) {
572 __get_cpu_var(no_cfg_addr)++; 314 no_cfg_addr++;
573 return 0; 315 return 0;
574 } 316 }
575 317
@@ -611,7 +353,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
611 if (ret != 0) { 353 if (ret != 0) {
612 printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n", 354 printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
613 ret, dn->full_name); 355 ret, dn->full_name);
614 __get_cpu_var(false_positives)++; 356 false_positives++;
615 rc = 0; 357 rc = 0;
616 goto dn_unlock; 358 goto dn_unlock;
617 } 359 }
@@ -620,14 +362,14 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
620 if (rets[1] != 1) { 362 if (rets[1] != 1) {
621 printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n", 363 printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
622 ret, dn->full_name); 364 ret, dn->full_name);
623 __get_cpu_var(false_positives)++; 365 false_positives++;
624 rc = 0; 366 rc = 0;
625 goto dn_unlock; 367 goto dn_unlock;
626 } 368 }
627 369
628 /* If not the kind of error we know about, punt. */ 370 /* If not the kind of error we know about, punt. */
629 if (rets[0] != 2 && rets[0] != 4 && rets[0] != 5) { 371 if (rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
630 __get_cpu_var(false_positives)++; 372 false_positives++;
631 rc = 0; 373 rc = 0;
632 goto dn_unlock; 374 goto dn_unlock;
633 } 375 }
@@ -635,12 +377,12 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
635 /* Note that config-io to empty slots may fail; 377 /* Note that config-io to empty slots may fail;
636 * we recognize empty because they don't have children. */ 378 * we recognize empty because they don't have children. */
637 if ((rets[0] == 5) && (dn->child == NULL)) { 379 if ((rets[0] == 5) && (dn->child == NULL)) {
638 __get_cpu_var(false_positives)++; 380 false_positives++;
639 rc = 0; 381 rc = 0;
640 goto dn_unlock; 382 goto dn_unlock;
641 } 383 }
642 384
643 __get_cpu_var(slot_resets)++; 385 slot_resets++;
644 386
645 /* Avoid repeated reports of this failure, including problems 387 /* Avoid repeated reports of this failure, including problems
646 * with other functions on this device, and functions under 388 * with other functions on this device, and functions under
@@ -648,8 +390,13 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
648 eeh_mark_slot (dn, EEH_MODE_ISOLATED); 390 eeh_mark_slot (dn, EEH_MODE_ISOLATED);
649 spin_unlock_irqrestore(&confirm_error_lock, flags); 391 spin_unlock_irqrestore(&confirm_error_lock, flags);
650 392
651 eeh_send_failure_event (dn, dev, rets[0], rets[2]); 393 state = pci_channel_io_normal;
652 394 if ((rets[0] == 2) || (rets[0] == 4))
395 state = pci_channel_io_frozen;
396 if (rets[0] == 5)
397 state = pci_channel_io_perm_failure;
398 eeh_send_failure_event (dn, dev, state, rets[2]);
399
653 /* Most EEH events are due to device driver bugs. Having 400 /* Most EEH events are due to device driver bugs. Having
654 * a stack trace will help the device-driver authors figure 401 * a stack trace will help the device-driver authors figure
655 * out what happened. So print that out. */ 402 * out what happened. So print that out. */
@@ -685,7 +432,7 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
685 addr = eeh_token_to_phys((unsigned long __force) token); 432 addr = eeh_token_to_phys((unsigned long __force) token);
686 dev = pci_get_device_by_addr(addr); 433 dev = pci_get_device_by_addr(addr);
687 if (!dev) { 434 if (!dev) {
688 __get_cpu_var(no_device)++; 435 no_device++;
689 return val; 436 return val;
690 } 437 }
691 438
@@ -716,11 +463,16 @@ eeh_slot_availability(struct pci_dn *pdn)
716 if (rc) return rc; 463 if (rc) return rc;
717 464
718 if (rets[1] == 0) return -1; /* EEH is not supported */ 465 if (rets[1] == 0) return -1; /* EEH is not supported */
719 if (rets[0] == 0) return 0; /* Oll Korrect */ 466 if (rets[0] == 0) return 0; /* Oll Korrect */
720 if (rets[0] == 5) { 467 if (rets[0] == 5) {
721 if (rets[2] == 0) return -1; /* permanently unavailable */ 468 if (rets[2] == 0) return -1; /* permanently unavailable */
722 return rets[2]; /* number of millisecs to wait */ 469 return rets[2]; /* number of millisecs to wait */
723 } 470 }
471 if (rets[0] == 1)
472 return 250;
473
474 printk (KERN_ERR "EEH: Slot unavailable: rc=%d, rets=%d %d %d\n",
475 rc, rets[0], rets[1], rets[2]);
724 return -1; 476 return -1;
725} 477}
726 478
@@ -737,6 +489,7 @@ eeh_slot_availability(struct pci_dn *pdn)
737static void 489static void
738rtas_pci_slot_reset(struct pci_dn *pdn, int state) 490rtas_pci_slot_reset(struct pci_dn *pdn, int state)
739{ 491{
492 int config_addr;
740 int rc; 493 int rc;
741 494
742 BUG_ON (pdn==NULL); 495 BUG_ON (pdn==NULL);
@@ -747,8 +500,13 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state)
747 return; 500 return;
748 } 501 }
749 502
503 /* Use PE configuration address, if present */
504 config_addr = pdn->eeh_config_addr;
505 if (pdn->eeh_pe_config_addr)
506 config_addr = pdn->eeh_pe_config_addr;
507
750 rc = rtas_call(ibm_set_slot_reset,4,1, NULL, 508 rc = rtas_call(ibm_set_slot_reset,4,1, NULL,
751 pdn->eeh_config_addr, 509 config_addr,
752 BUID_HI(pdn->phb->buid), 510 BUID_HI(pdn->phb->buid),
753 BUID_LO(pdn->phb->buid), 511 BUID_LO(pdn->phb->buid),
754 state); 512 state);
@@ -761,9 +519,11 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state)
761 519
762/** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second 520/** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
763 * dn -- device node to be reset. 521 * dn -- device node to be reset.
522 *
523 * Return 0 if success, else a non-zero value.
764 */ 524 */
765 525
766void 526int
767rtas_set_slot_reset(struct pci_dn *pdn) 527rtas_set_slot_reset(struct pci_dn *pdn)
768{ 528{
769 int i, rc; 529 int i, rc;
@@ -793,10 +553,21 @@ rtas_set_slot_reset(struct pci_dn *pdn)
793 * ready to be used; if not, wait for recovery. */ 553 * ready to be used; if not, wait for recovery. */
794 for (i=0; i<10; i++) { 554 for (i=0; i<10; i++) {
795 rc = eeh_slot_availability (pdn); 555 rc = eeh_slot_availability (pdn);
796 if (rc <= 0) break; 556 if (rc < 0)
557 printk (KERN_ERR "EEH: failed (%d) to reset slot %s\n", rc, pdn->node->full_name);
558 if (rc == 0)
559 return 0;
560 if (rc < 0)
561 return -1;
797 562
798 msleep (rc+100); 563 msleep (rc+100);
799 } 564 }
565
566 rc = eeh_slot_availability (pdn);
567 if (rc)
568 printk (KERN_ERR "EEH: timeout resetting slot %s\n", pdn->node->full_name);
569
570 return rc;
800} 571}
801 572
802/* ------------------------------------------------------- */ 573/* ------------------------------------------------------- */
@@ -851,7 +622,7 @@ void eeh_restore_bars(struct pci_dn *pdn)
851 if (!pdn) 622 if (!pdn)
852 return; 623 return;
853 624
854 if (! pdn->eeh_is_bridge) 625 if ((pdn->eeh_mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(pdn->class_code))
855 __restore_bars (pdn); 626 __restore_bars (pdn);
856 627
857 dn = pdn->node->child; 628 dn = pdn->node->child;
@@ -869,30 +640,30 @@ void eeh_restore_bars(struct pci_dn *pdn)
869 * PCI devices are added individuallly; but, for the restore, 640 * PCI devices are added individuallly; but, for the restore,
870 * an entire slot is reset at a time. 641 * an entire slot is reset at a time.
871 */ 642 */
872static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn) 643static void eeh_save_bars(struct pci_dn *pdn)
873{ 644{
874 int i; 645 int i;
875 646
876 if (!pdev || !pdn ) 647 if (!pdn )
877 return; 648 return;
878 649
879 for (i = 0; i < 16; i++) 650 for (i = 0; i < 16; i++)
880 pci_read_config_dword(pdev, i * 4, &pdn->config_space[i]); 651 rtas_read_config(pdn, i * 4, 4, &pdn->config_space[i]);
881
882 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
883 pdn->eeh_is_bridge = 1;
884} 652}
885 653
886void 654void
887rtas_configure_bridge(struct pci_dn *pdn) 655rtas_configure_bridge(struct pci_dn *pdn)
888{ 656{
889 int token = rtas_token ("ibm,configure-bridge"); 657 int config_addr;
890 int rc; 658 int rc;
891 659
892 if (token == RTAS_UNKNOWN_SERVICE) 660 /* Use PE configuration address, if present */
893 return; 661 config_addr = pdn->eeh_config_addr;
894 rc = rtas_call(token,3,1, NULL, 662 if (pdn->eeh_pe_config_addr)
895 pdn->eeh_config_addr, 663 config_addr = pdn->eeh_pe_config_addr;
664
665 rc = rtas_call(ibm_configure_bridge,3,1, NULL,
666 config_addr,
896 BUID_HI(pdn->phb->buid), 667 BUID_HI(pdn->phb->buid),
897 BUID_LO(pdn->phb->buid)); 668 BUID_LO(pdn->phb->buid));
898 if (rc) { 669 if (rc) {
@@ -927,6 +698,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
927 int enable; 698 int enable;
928 struct pci_dn *pdn = PCI_DN(dn); 699 struct pci_dn *pdn = PCI_DN(dn);
929 700
701 pdn->class_code = 0;
930 pdn->eeh_mode = 0; 702 pdn->eeh_mode = 0;
931 pdn->eeh_check_count = 0; 703 pdn->eeh_check_count = 0;
932 pdn->eeh_freeze_count = 0; 704 pdn->eeh_freeze_count = 0;
@@ -943,6 +715,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
943 pdn->eeh_mode |= EEH_MODE_NOCHECK; 715 pdn->eeh_mode |= EEH_MODE_NOCHECK;
944 return NULL; 716 return NULL;
945 } 717 }
718 pdn->class_code = *class_code;
946 719
947 /* 720 /*
948 * Now decide if we are going to "Disable" EEH checking 721 * Now decide if we are going to "Disable" EEH checking
@@ -953,8 +726,10 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
953 * But there are a few cases like display devices that make sense. 726 * But there are a few cases like display devices that make sense.
954 */ 727 */
955 enable = 1; /* i.e. we will do checking */ 728 enable = 1; /* i.e. we will do checking */
729#if 0
956 if ((*class_code >> 16) == PCI_BASE_CLASS_DISPLAY) 730 if ((*class_code >> 16) == PCI_BASE_CLASS_DISPLAY)
957 enable = 0; 731 enable = 0;
732#endif
958 733
959 if (!enable) 734 if (!enable)
960 pdn->eeh_mode |= EEH_MODE_NOCHECK; 735 pdn->eeh_mode |= EEH_MODE_NOCHECK;
@@ -973,8 +748,22 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
973 eeh_subsystem_enabled = 1; 748 eeh_subsystem_enabled = 1;
974 pdn->eeh_mode |= EEH_MODE_SUPPORTED; 749 pdn->eeh_mode |= EEH_MODE_SUPPORTED;
975 pdn->eeh_config_addr = regs[0]; 750 pdn->eeh_config_addr = regs[0];
751
752 /* If the newer, better, ibm,get-config-addr-info is supported,
753 * then use that instead. */
754 pdn->eeh_pe_config_addr = 0;
755 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
756 unsigned int rets[2];
757 ret = rtas_call (ibm_get_config_addr_info, 4, 2, rets,
758 pdn->eeh_config_addr,
759 info->buid_hi, info->buid_lo,
760 0);
761 if (ret == 0)
762 pdn->eeh_pe_config_addr = rets[0];
763 }
976#ifdef DEBUG 764#ifdef DEBUG
977 printk(KERN_DEBUG "EEH: %s: eeh enabled\n", dn->full_name); 765 printk(KERN_DEBUG "EEH: %s: eeh enabled, config=%x pe_config=%x\n",
766 dn->full_name, pdn->eeh_config_addr, pdn->eeh_pe_config_addr);
978#endif 767#endif
979 } else { 768 } else {
980 769
@@ -993,6 +782,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
993 dn->full_name); 782 dn->full_name);
994 } 783 }
995 784
785 eeh_save_bars(pdn);
996 return NULL; 786 return NULL;
997} 787}
998 788
@@ -1026,6 +816,8 @@ void __init eeh_init(void)
1026 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); 816 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
1027 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); 817 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
1028 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); 818 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
819 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
820 ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
1029 821
1030 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) 822 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
1031 return; 823 return;
@@ -1080,12 +872,10 @@ void eeh_add_device_early(struct device_node *dn)
1080 if (!dn || !PCI_DN(dn)) 872 if (!dn || !PCI_DN(dn))
1081 return; 873 return;
1082 phb = PCI_DN(dn)->phb; 874 phb = PCI_DN(dn)->phb;
1083 if (NULL == phb || 0 == phb->buid) { 875
1084 printk(KERN_WARNING "EEH: Expected buid but found none for %s\n", 876 /* USB Bus children of PCI devices will not have BUID's */
1085 dn->full_name); 877 if (NULL == phb || 0 == phb->buid)
1086 dump_stack();
1087 return; 878 return;
1088 }
1089 879
1090 info.buid_hi = BUID_HI(phb->buid); 880 info.buid_hi = BUID_HI(phb->buid);
1091 info.buid_lo = BUID_LO(phb->buid); 881 info.buid_lo = BUID_LO(phb->buid);
@@ -1127,7 +917,6 @@ void eeh_add_device_late(struct pci_dev *dev)
1127 pdn->pcidev = dev; 917 pdn->pcidev = dev;
1128 918
1129 pci_addr_cache_insert_device (dev); 919 pci_addr_cache_insert_device (dev);
1130 eeh_save_bars(dev, pdn);
1131} 920}
1132EXPORT_SYMBOL_GPL(eeh_add_device_late); 921EXPORT_SYMBOL_GPL(eeh_add_device_late);
1133 922
@@ -1175,25 +964,9 @@ EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
1175 964
1176static int proc_eeh_show(struct seq_file *m, void *v) 965static int proc_eeh_show(struct seq_file *m, void *v)
1177{ 966{
1178 unsigned int cpu;
1179 unsigned long ffs = 0, positives = 0, failures = 0;
1180 unsigned long resets = 0;
1181 unsigned long no_dev = 0, no_dn = 0, no_cfg = 0, no_check = 0;
1182
1183 for_each_cpu(cpu) {
1184 ffs += per_cpu(total_mmio_ffs, cpu);
1185 positives += per_cpu(false_positives, cpu);
1186 failures += per_cpu(ignored_failures, cpu);
1187 resets += per_cpu(slot_resets, cpu);
1188 no_dev += per_cpu(no_device, cpu);
1189 no_dn += per_cpu(no_dn, cpu);
1190 no_cfg += per_cpu(no_cfg_addr, cpu);
1191 no_check += per_cpu(ignored_check, cpu);
1192 }
1193
1194 if (0 == eeh_subsystem_enabled) { 967 if (0 == eeh_subsystem_enabled) {
1195 seq_printf(m, "EEH Subsystem is globally disabled\n"); 968 seq_printf(m, "EEH Subsystem is globally disabled\n");
1196 seq_printf(m, "eeh_total_mmio_ffs=%ld\n", ffs); 969 seq_printf(m, "eeh_total_mmio_ffs=%ld\n", total_mmio_ffs);
1197 } else { 970 } else {
1198 seq_printf(m, "EEH Subsystem is enabled\n"); 971 seq_printf(m, "EEH Subsystem is enabled\n");
1199 seq_printf(m, 972 seq_printf(m,
@@ -1205,8 +978,10 @@ static int proc_eeh_show(struct seq_file *m, void *v)
1205 "eeh_false_positives=%ld\n" 978 "eeh_false_positives=%ld\n"
1206 "eeh_ignored_failures=%ld\n" 979 "eeh_ignored_failures=%ld\n"
1207 "eeh_slot_resets=%ld\n", 980 "eeh_slot_resets=%ld\n",
1208 no_dev, no_dn, no_cfg, no_check, 981 no_device, no_dn, no_cfg_addr,
1209 ffs, positives, failures, resets); 982 ignored_check, total_mmio_ffs,
983 false_positives, ignored_failures,
984 slot_resets);
1210 } 985 }
1211 986
1212 return 0; 987 return 0;
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
new file mode 100644
index 000000000000..d4a402c5866c
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -0,0 +1,316 @@
1/*
2 * eeh_cache.c
3 * PCI address cache; allows the lookup of PCI devices based on I/O address
4 *
5 * Copyright (C) 2004 Linas Vepstas <linas@austin.ibm.com> IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/list.h>
23#include <linux/pci.h>
24#include <linux/rbtree.h>
25#include <linux/spinlock.h>
26#include <asm/atomic.h>
27#include <asm/pci-bridge.h>
28#include <asm/ppc-pci.h>
29
30#undef DEBUG
31
32/**
33 * The pci address cache subsystem. This subsystem places
34 * PCI device address resources into a red-black tree, sorted
35 * according to the address range, so that given only an i/o
36 * address, the corresponding PCI device can be **quickly**
37 * found. It is safe to perform an address lookup in an interrupt
38 * context; this ability is an important feature.
39 *
40 * Currently, the only customer of this code is the EEH subsystem;
41 * thus, this code has been somewhat tailored to suit EEH better.
42 * In particular, the cache does *not* hold the addresses of devices
43 * for which EEH is not enabled.
44 *
45 * (Implementation Note: The RB tree seems to be better/faster
46 * than any hash algo I could think of for this problem, even
47 * with the penalty of slow pointer chases for d-cache misses).
48 */
49struct pci_io_addr_range
50{
51 struct rb_node rb_node;
52 unsigned long addr_lo;
53 unsigned long addr_hi;
54 struct pci_dev *pcidev;
55 unsigned int flags;
56};
57
58static struct pci_io_addr_cache
59{
60 struct rb_root rb_root;
61 spinlock_t piar_lock;
62} pci_io_addr_cache_root;
63
64static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
65{
66 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
67
68 while (n) {
69 struct pci_io_addr_range *piar;
70 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
71
72 if (addr < piar->addr_lo) {
73 n = n->rb_left;
74 } else {
75 if (addr > piar->addr_hi) {
76 n = n->rb_right;
77 } else {
78 pci_dev_get(piar->pcidev);
79 return piar->pcidev;
80 }
81 }
82 }
83
84 return NULL;
85}
86
87/**
88 * pci_get_device_by_addr - Get device, given only address
89 * @addr: mmio (PIO) phys address or i/o port number
90 *
91 * Given an mmio phys address, or a port number, find a pci device
92 * that implements this address. Be sure to pci_dev_put the device
93 * when finished. I/O port numbers are assumed to be offset
94 * from zero (that is, they do *not* have pci_io_addr added in).
95 * It is safe to call this function within an interrupt.
96 */
97struct pci_dev *pci_get_device_by_addr(unsigned long addr)
98{
99 struct pci_dev *dev;
100 unsigned long flags;
101
102 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
103 dev = __pci_get_device_by_addr(addr);
104 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
105 return dev;
106}
107
108#ifdef DEBUG
109/*
110 * Handy-dandy debug print routine, does nothing more
111 * than print out the contents of our addr cache.
112 */
113static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
114{
115 struct rb_node *n;
116 int cnt = 0;
117
118 n = rb_first(&cache->rb_root);
119 while (n) {
120 struct pci_io_addr_range *piar;
121 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
122 printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n",
123 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
124 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
125 cnt++;
126 n = rb_next(n);
127 }
128}
129#endif
130
131/* Insert address range into the rb tree. */
132static struct pci_io_addr_range *
133pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
134 unsigned long ahi, unsigned int flags)
135{
136 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
137 struct rb_node *parent = NULL;
138 struct pci_io_addr_range *piar;
139
140 /* Walk tree, find a place to insert into tree */
141 while (*p) {
142 parent = *p;
143 piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
144 if (ahi < piar->addr_lo) {
145 p = &parent->rb_left;
146 } else if (alo > piar->addr_hi) {
147 p = &parent->rb_right;
148 } else {
149 if (dev != piar->pcidev ||
150 alo != piar->addr_lo || ahi != piar->addr_hi) {
151 printk(KERN_WARNING "PIAR: overlapping address range\n");
152 }
153 return piar;
154 }
155 }
156 piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
157 if (!piar)
158 return NULL;
159
160 piar->addr_lo = alo;
161 piar->addr_hi = ahi;
162 piar->pcidev = dev;
163 piar->flags = flags;
164
165#ifdef DEBUG
166 printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
167 alo, ahi, pci_name (dev));
168#endif
169
170 rb_link_node(&piar->rb_node, parent, p);
171 rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
172
173 return piar;
174}
175
176static void __pci_addr_cache_insert_device(struct pci_dev *dev)
177{
178 struct device_node *dn;
179 struct pci_dn *pdn;
180 int i;
181 int inserted = 0;
182
183 dn = pci_device_to_OF_node(dev);
184 if (!dn) {
185 printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
186 return;
187 }
188
189 /* Skip any devices for which EEH is not enabled. */
190 pdn = PCI_DN(dn);
191 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
192 pdn->eeh_mode & EEH_MODE_NOCHECK) {
193#ifdef DEBUG
194 printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
195 pci_name(dev), pdn->node->full_name);
196#endif
197 return;
198 }
199
200 /* The cache holds a reference to the device... */
201 pci_dev_get(dev);
202
203 /* Walk resources on this device, poke them into the tree */
204 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
205 unsigned long start = pci_resource_start(dev,i);
206 unsigned long end = pci_resource_end(dev,i);
207 unsigned int flags = pci_resource_flags(dev,i);
208
209 /* We are interested only bus addresses, not dma or other stuff */
210 if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
211 continue;
212 if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
213 continue;
214 pci_addr_cache_insert(dev, start, end, flags);
215 inserted = 1;
216 }
217
218 /* If there was nothing to add, the cache has no reference... */
219 if (!inserted)
220 pci_dev_put(dev);
221}
222
223/**
224 * pci_addr_cache_insert_device - Add a device to the address cache
225 * @dev: PCI device whose I/O addresses we are interested in.
226 *
227 * In order to support the fast lookup of devices based on addresses,
228 * we maintain a cache of devices that can be quickly searched.
229 * This routine adds a device to that cache.
230 */
231void pci_addr_cache_insert_device(struct pci_dev *dev)
232{
233 unsigned long flags;
234
235 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
236 __pci_addr_cache_insert_device(dev);
237 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
238}
239
240static inline void __pci_addr_cache_remove_device(struct pci_dev *dev)
241{
242 struct rb_node *n;
243 int removed = 0;
244
245restart:
246 n = rb_first(&pci_io_addr_cache_root.rb_root);
247 while (n) {
248 struct pci_io_addr_range *piar;
249 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
250
251 if (piar->pcidev == dev) {
252 rb_erase(n, &pci_io_addr_cache_root.rb_root);
253 removed = 1;
254 kfree(piar);
255 goto restart;
256 }
257 n = rb_next(n);
258 }
259
260 /* The cache no longer holds its reference to this device... */
261 if (removed)
262 pci_dev_put(dev);
263}
264
265/**
266 * pci_addr_cache_remove_device - remove pci device from addr cache
267 * @dev: device to remove
268 *
269 * Remove a device from the addr-cache tree.
270 * This is potentially expensive, since it will walk
271 * the tree multiple times (once per resource).
272 * But so what; device removal doesn't need to be that fast.
273 */
274void pci_addr_cache_remove_device(struct pci_dev *dev)
275{
276 unsigned long flags;
277
278 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
279 __pci_addr_cache_remove_device(dev);
280 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
281}
282
283/**
284 * pci_addr_cache_build - Build a cache of I/O addresses
285 *
286 * Build a cache of pci i/o addresses. This cache will be used to
287 * find the pci device that corresponds to a given address.
288 * This routine scans all pci busses to build the cache.
289 * Must be run late in boot process, after the pci controllers
290 * have been scaned for devices (after all device resources are known).
291 */
292void __init pci_addr_cache_build(void)
293{
294 struct device_node *dn;
295 struct pci_dev *dev = NULL;
296
297 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
298
299 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
300 /* Ignore PCI bridges */
301 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
302 continue;
303
304 pci_addr_cache_insert_device(dev);
305
306 dn = pci_device_to_OF_node(dev);
307 pci_dev_get (dev); /* matching put is in eeh_remove_device() */
308 PCI_DN(dn)->pcidev = dev;
309 }
310
311#ifdef DEBUG
312 /* Verify tree built up above, echo back the list of addrs. */
313 pci_addr_cache_print(&pci_io_addr_cache_root);
314#endif
315}
316
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
new file mode 100644
index 000000000000..6373372932ba
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -0,0 +1,376 @@
1/*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright (C) 2004, 2005 Linas Vepstas <linas@linas.org>
4 *
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or (at
10 * your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15 * NON INFRINGEMENT. See the GNU General Public License for more
16 * details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 * Send feedback to <linas@us.ibm.com>
23 *
24 */
25#include <linux/delay.h>
26#include <linux/irq.h>
27#include <linux/interrupt.h>
28#include <linux/notifier.h>
29#include <linux/pci.h>
30#include <asm/eeh.h>
31#include <asm/eeh_event.h>
32#include <asm/ppc-pci.h>
33#include <asm/pci-bridge.h>
34#include <asm/prom.h>
35#include <asm/rtas.h>
36
37
38static inline const char * pcid_name (struct pci_dev *pdev)
39{
40 if (pdev->dev.driver)
41 return pdev->dev.driver->name;
42 return "";
43}
44
45#ifdef DEBUG
46static void print_device_node_tree (struct pci_dn *pdn, int dent)
47{
48 int i;
49 if (!pdn) return;
50 for (i=0;i<dent; i++)
51 printk(" ");
52 printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
53 pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
54 pdn->eeh_pe_config_addr, pdn->node->full_name);
55 dent += 3;
56 struct device_node *pc = pdn->node->child;
57 while (pc) {
58 print_device_node_tree(PCI_DN(pc), dent);
59 pc = pc->sibling;
60 }
61}
62#endif
63
64/**
65 * irq_in_use - return true if this irq is being used
66 */
67static int irq_in_use(unsigned int irq)
68{
69 int rc = 0;
70 unsigned long flags;
71 struct irq_desc *desc = irq_desc + irq;
72
73 spin_lock_irqsave(&desc->lock, flags);
74 if (desc->action)
75 rc = 1;
76 spin_unlock_irqrestore(&desc->lock, flags);
77 return rc;
78}
79
80/* ------------------------------------------------------- */
81/** eeh_report_error - report an EEH error to each device,
82 * collect up and merge the device responses.
83 */
84
85static void eeh_report_error(struct pci_dev *dev, void *userdata)
86{
87 enum pci_ers_result rc, *res = userdata;
88 struct pci_driver *driver = dev->driver;
89
90 dev->error_state = pci_channel_io_frozen;
91
92 if (!driver)
93 return;
94
95 if (irq_in_use (dev->irq)) {
96 struct device_node *dn = pci_device_to_OF_node(dev);
97 PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED;
98 disable_irq_nosync(dev->irq);
99 }
100 if (!driver->err_handler)
101 return;
102 if (!driver->err_handler->error_detected)
103 return;
104
105 rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
106 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
107 if (*res == PCI_ERS_RESULT_NEED_RESET) return;
108 if (*res == PCI_ERS_RESULT_DISCONNECT &&
109 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
110}
111
112/** eeh_report_reset -- tell this device that the pci slot
113 * has been reset.
114 */
115
116static void eeh_report_reset(struct pci_dev *dev, void *userdata)
117{
118 struct pci_driver *driver = dev->driver;
119 struct device_node *dn = pci_device_to_OF_node(dev);
120
121 if (!driver)
122 return;
123
124 if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) {
125 PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED;
126 enable_irq(dev->irq);
127 }
128 if (!driver->err_handler)
129 return;
130 if (!driver->err_handler->slot_reset)
131 return;
132
133 driver->err_handler->slot_reset(dev);
134}
135
136static void eeh_report_resume(struct pci_dev *dev, void *userdata)
137{
138 struct pci_driver *driver = dev->driver;
139
140 dev->error_state = pci_channel_io_normal;
141
142 if (!driver)
143 return;
144 if (!driver->err_handler)
145 return;
146 if (!driver->err_handler->resume)
147 return;
148
149 driver->err_handler->resume(dev);
150}
151
152static void eeh_report_failure(struct pci_dev *dev, void *userdata)
153{
154 struct pci_driver *driver = dev->driver;
155
156 dev->error_state = pci_channel_io_perm_failure;
157
158 if (!driver)
159 return;
160
161 if (irq_in_use (dev->irq)) {
162 struct device_node *dn = pci_device_to_OF_node(dev);
163 PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED;
164 disable_irq_nosync(dev->irq);
165 }
166 if (!driver->err_handler)
167 return;
168 if (!driver->err_handler->error_detected)
169 return;
170 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
171}
172
173/* ------------------------------------------------------- */
174/**
175 * handle_eeh_events -- reset a PCI device after hard lockup.
176 *
177 * pSeries systems will isolate a PCI slot if the PCI-Host
178 * bridge detects address or data parity errors, DMA's
179 * occuring to wild addresses (which usually happen due to
180 * bugs in device drivers or in PCI adapter firmware).
181 * Slot isolations also occur if #SERR, #PERR or other misc
182 * PCI-related errors are detected.
183 *
184 * Recovery process consists of unplugging the device driver
185 * (which generated hotplug events to userspace), then issuing
186 * a PCI #RST to the device, then reconfiguring the PCI config
187 * space for all bridges & devices under this slot, and then
188 * finally restarting the device drivers (which cause a second
189 * set of hotplug events to go out to userspace).
190 */
191
192/**
193 * eeh_reset_device() -- perform actual reset of a pci slot
194 * Args: bus: pointer to the pci bus structure corresponding
195 * to the isolated slot. A non-null value will
196 * cause all devices under the bus to be removed
197 * and then re-added.
198 * pe_dn: pointer to a "Partionable Endpoint" device node.
199 * This is the top-level structure on which pci
200 * bus resets can be performed.
201 */
202
203static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
204{
205 int rc;
206 if (bus)
207 pcibios_remove_pci_devices(bus);
208
209 /* Reset the pci controller. (Asserts RST#; resets config space).
210 * Reconfigure bridges and devices. Don't try to bring the system
211 * up if the reset failed for some reason. */
212 rc = rtas_set_slot_reset(pe_dn);
213 if (rc)
214 return rc;
215
216 /* New-style config addrs might be shared across multiple devices,
217 * Walk over all functions on this device */
218 if (pe_dn->eeh_pe_config_addr) {
219 struct device_node *pe = pe_dn->node;
220 pe = pe->parent->child;
221 while (pe) {
222 struct pci_dn *ppe = PCI_DN(pe);
223 if (pe_dn->eeh_pe_config_addr == ppe->eeh_pe_config_addr) {
224 rtas_configure_bridge(ppe);
225 eeh_restore_bars(ppe);
226 }
227 pe = pe->sibling;
228 }
229 } else {
230 rtas_configure_bridge(pe_dn);
231 eeh_restore_bars(pe_dn);
232 }
233
234 /* Give the system 5 seconds to finish running the user-space
235 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
236 * this is a hack, but if we don't do this, and try to bring
237 * the device up before the scripts have taken it down,
238 * potentially weird things happen.
239 */
240 if (bus) {
241 ssleep (5);
242 pcibios_add_pci_devices(bus);
243 }
244
245 return 0;
246}
247
248/* The longest amount of time to wait for a pci device
249 * to come back on line, in seconds.
250 */
251#define MAX_WAIT_FOR_RECOVERY 15
252
253void handle_eeh_events (struct eeh_event *event)
254{
255 struct device_node *frozen_dn;
256 struct pci_dn *frozen_pdn;
257 struct pci_bus *frozen_bus;
258 int rc = 0;
259 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
260
261 frozen_dn = find_device_pe(event->dn);
262 frozen_bus = pcibios_find_pci_bus(frozen_dn);
263
264 if (!frozen_dn) {
265 printk(KERN_ERR "EEH: Error: Cannot find partition endpoint for %s\n",
266 pci_name(event->dev));
267 return;
268 }
269
270 /* There are two different styles for coming up with the PE.
271 * In the old style, it was the highest EEH-capable device
272 * which was always an EADS pci bridge. In the new style,
273 * there might not be any EADS bridges, and even when there are,
274 * the firmware marks them as "EEH incapable". So another
275 * two-step is needed to find the pci bus.. */
276 if (!frozen_bus)
277 frozen_bus = pcibios_find_pci_bus (frozen_dn->parent);
278
279 if (!frozen_bus) {
280 printk(KERN_ERR "EEH: Cannot find PCI bus for %s\n",
281 frozen_dn->full_name);
282 return;
283 }
284
285#if 0
286 /* We may get "permanent failure" messages on empty slots.
287 * These are false alarms. Empty slots have no child dn. */
288 if ((event->state == pci_channel_io_perm_failure) && (frozen_device == NULL))
289 return;
290#endif
291
292 frozen_pdn = PCI_DN(frozen_dn);
293 frozen_pdn->eeh_freeze_count++;
294
295 if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
296 goto hard_fail;
297
298 /* If the reset state is a '5' and the time to reset is 0 (infinity)
299 * or is more then 15 seconds, then mark this as a permanent failure.
300 */
301 if ((event->state == pci_channel_io_perm_failure) &&
302 ((event->time_unavail <= 0) ||
303 (event->time_unavail > MAX_WAIT_FOR_RECOVERY*1000)))
304 goto hard_fail;
305
306 eeh_slot_error_detail(frozen_pdn, 1 /* Temporary Error */);
307 printk(KERN_WARNING
308 "EEH: This PCI device has failed %d times since last reboot: %s - %s\n",
309 frozen_pdn->eeh_freeze_count,
310 pci_name (frozen_pdn->pcidev),
311 pcid_name(frozen_pdn->pcidev));
312
313 /* Walk the various device drivers attached to this slot through
314 * a reset sequence, giving each an opportunity to do what it needs
315 * to accomplish the reset. Each child gets a report of the
316 * status ... if any child can't handle the reset, then the entire
317 * slot is dlpar removed and added.
318 */
319 pci_walk_bus(frozen_bus, eeh_report_error, &result);
320
321 /* If all device drivers were EEH-unaware, then shut
322 * down all of the device drivers, and hope they
323 * go down willingly, without panicing the system.
324 */
325 if (result == PCI_ERS_RESULT_NONE) {
326 rc = eeh_reset_device(frozen_pdn, frozen_bus);
327 if (rc)
328 goto hard_fail;
329 }
330
331 /* If any device called out for a reset, then reset the slot */
332 if (result == PCI_ERS_RESULT_NEED_RESET) {
333 rc = eeh_reset_device(frozen_pdn, NULL);
334 if (rc)
335 goto hard_fail;
336 pci_walk_bus(frozen_bus, eeh_report_reset, 0);
337 }
338
339 /* If all devices reported they can proceed, the re-enable PIO */
340 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
341 /* XXX Not supported; we brute-force reset the device */
342 rc = eeh_reset_device(frozen_pdn, NULL);
343 if (rc)
344 goto hard_fail;
345 pci_walk_bus(frozen_bus, eeh_report_reset, 0);
346 }
347
348 /* Tell all device drivers that they can resume operations */
349 pci_walk_bus(frozen_bus, eeh_report_resume, 0);
350
351 return;
352
353hard_fail:
354 /*
355 * About 90% of all real-life EEH failures in the field
356 * are due to poorly seated PCI cards. Only 10% or so are
357 * due to actual, failed cards.
358 */
359 printk(KERN_ERR
360 "EEH: PCI device %s - %s has failed %d times \n"
361 "and has been permanently disabled. Please try reseating\n"
362 "this device or replacing it.\n",
363 pci_name (frozen_pdn->pcidev),
364 pcid_name(frozen_pdn->pcidev),
365 frozen_pdn->eeh_freeze_count);
366
367 eeh_slot_error_detail(frozen_pdn, 2 /* Permanent Error */);
368
369 /* Notify all devices that they're about to go down. */
370 pci_walk_bus(frozen_bus, eeh_report_failure, 0);
371
372 /* Shut down the device drivers for good. */
373 pcibios_remove_pci_devices(frozen_bus);
374}
375
376/* ---------- end of file ---------- */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 92497333c2b6..9a9961f27480 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -21,6 +21,7 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <asm/eeh_event.h> 23#include <asm/eeh_event.h>
24#include <asm/ppc-pci.h>
24 25
25/** Overview: 26/** Overview:
26 * EEH error states may be detected within exception handlers; 27 * EEH error states may be detected within exception handlers;
@@ -37,31 +38,6 @@ static void eeh_thread_launcher(void *);
37DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); 38DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
38 39
39/** 40/**
40 * eeh_panic - call panic() for an eeh event that cannot be handled.
41 * The philosophy of this routine is that it is better to panic and
42 * halt the OS than it is to risk possible data corruption by
43 * oblivious device drivers that don't know better.
44 *
45 * @dev pci device that had an eeh event
46 * @reset_state current reset state of the device slot
47 */
48static void eeh_panic(struct pci_dev *dev, int reset_state)
49{
50 /*
51 * Since the panic_on_oops sysctl is used to halt the system
52 * in light of potential corruption, we can use it here.
53 */
54 if (panic_on_oops) {
55 panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
56 pci_name(dev));
57 }
58 else {
59 printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
60 reset_state, pci_name(dev));
61 }
62}
63
64/**
65 * eeh_event_handler - dispatch EEH events. The detection of a frozen 41 * eeh_event_handler - dispatch EEH events. The detection of a frozen
66 * slot can occur inside an interrupt, where it can be hard to do 42 * slot can occur inside an interrupt, where it can be hard to do
67 * anything about it. The goal of this routine is to pull these 43 * anything about it. The goal of this routine is to pull these
@@ -82,10 +58,16 @@ static int eeh_event_handler(void * dummy)
82 58
83 spin_lock_irqsave(&eeh_eventlist_lock, flags); 59 spin_lock_irqsave(&eeh_eventlist_lock, flags);
84 event = NULL; 60 event = NULL;
61
62 /* Unqueue the event, get ready to process. */
85 if (!list_empty(&eeh_eventlist)) { 63 if (!list_empty(&eeh_eventlist)) {
86 event = list_entry(eeh_eventlist.next, struct eeh_event, list); 64 event = list_entry(eeh_eventlist.next, struct eeh_event, list);
87 list_del(&event->list); 65 list_del(&event->list);
88 } 66 }
67
68 if (event)
69 eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
70
89 spin_unlock_irqrestore(&eeh_eventlist_lock, flags); 71 spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
90 if (event == NULL) 72 if (event == NULL)
91 break; 73 break;
@@ -93,8 +75,11 @@ static int eeh_event_handler(void * dummy)
93 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", 75 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
94 pci_name(event->dev)); 76 pci_name(event->dev));
95 77
96 eeh_panic (event->dev, event->state); 78 handle_eeh_events(event);
79
80 eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
97 81
82 pci_dev_put(event->dev);
98 kfree(event); 83 kfree(event);
99 } 84 }
100 85
@@ -122,7 +107,7 @@ static void eeh_thread_launcher(void *dummy)
122 */ 107 */
123int eeh_send_failure_event (struct device_node *dn, 108int eeh_send_failure_event (struct device_node *dn,
124 struct pci_dev *dev, 109 struct pci_dev *dev,
125 int state, 110 enum pci_channel_state state,
126 int time_unavail) 111 int time_unavail)
127{ 112{
128 unsigned long flags; 113 unsigned long flags;