diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2015-04-06 23:24:55 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-04-06 23:24:55 -0400 |
commit | 428d4d6520a0b8683fe9eac6df3077001e13d00b (patch) | |
tree | 8afa1af0babc8f2c375acc244aae969846dfe199 | |
parent | 28ea605caac49497e5e34a73ee4f4682fc035f1d (diff) | |
parent | 027fa02f84e851e21daffdf8900d6117071890f8 (diff) |
Merge branch 'next-eeh' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc into next
28 files changed, 1652 insertions, 1648 deletions
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 38faeded7d59..9f1371bab5fc 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h | |||
@@ -8,6 +8,9 @@ | |||
8 | 8 | ||
9 | struct dma_map_ops; | 9 | struct dma_map_ops; |
10 | struct device_node; | 10 | struct device_node; |
11 | #ifdef CONFIG_PPC64 | ||
12 | struct pci_dn; | ||
13 | #endif | ||
11 | 14 | ||
12 | /* | 15 | /* |
13 | * Arch extensions to struct device. | 16 | * Arch extensions to struct device. |
@@ -34,6 +37,9 @@ struct dev_archdata { | |||
34 | #ifdef CONFIG_SWIOTLB | 37 | #ifdef CONFIG_SWIOTLB |
35 | dma_addr_t max_direct_dma_addr; | 38 | dma_addr_t max_direct_dma_addr; |
36 | #endif | 39 | #endif |
40 | #ifdef CONFIG_PPC64 | ||
41 | struct pci_dn *pci_data; | ||
42 | #endif | ||
37 | #ifdef CONFIG_EEH | 43 | #ifdef CONFIG_EEH |
38 | struct eeh_dev *edev; | 44 | struct eeh_dev *edev; |
39 | #endif | 45 | #endif |
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 55abfd09e47f..a52db28ecc1e 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | struct pci_dev; | 30 | struct pci_dev; |
31 | struct pci_bus; | 31 | struct pci_bus; |
32 | struct device_node; | 32 | struct pci_dn; |
33 | 33 | ||
34 | #ifdef CONFIG_EEH | 34 | #ifdef CONFIG_EEH |
35 | 35 | ||
@@ -136,14 +136,14 @@ struct eeh_dev { | |||
136 | struct eeh_pe *pe; /* Associated PE */ | 136 | struct eeh_pe *pe; /* Associated PE */ |
137 | struct list_head list; /* Form link list in the PE */ | 137 | struct list_head list; /* Form link list in the PE */ |
138 | struct pci_controller *phb; /* Associated PHB */ | 138 | struct pci_controller *phb; /* Associated PHB */ |
139 | struct device_node *dn; /* Associated device node */ | 139 | struct pci_dn *pdn; /* Associated PCI device node */ |
140 | struct pci_dev *pdev; /* Associated PCI device */ | 140 | struct pci_dev *pdev; /* Associated PCI device */ |
141 | struct pci_bus *bus; /* PCI bus for partial hotplug */ | 141 | struct pci_bus *bus; /* PCI bus for partial hotplug */ |
142 | }; | 142 | }; |
143 | 143 | ||
144 | static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev) | 144 | static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev) |
145 | { | 145 | { |
146 | return edev ? edev->dn : NULL; | 146 | return edev ? edev->pdn : NULL; |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) | 149 | static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) |
@@ -200,8 +200,7 @@ struct eeh_ops { | |||
200 | char *name; | 200 | char *name; |
201 | int (*init)(void); | 201 | int (*init)(void); |
202 | int (*post_init)(void); | 202 | int (*post_init)(void); |
203 | void* (*of_probe)(struct device_node *dn, void *flag); | 203 | void* (*probe)(struct pci_dn *pdn, void *data); |
204 | int (*dev_probe)(struct pci_dev *dev, void *flag); | ||
205 | int (*set_option)(struct eeh_pe *pe, int option); | 204 | int (*set_option)(struct eeh_pe *pe, int option); |
206 | int (*get_pe_addr)(struct eeh_pe *pe); | 205 | int (*get_pe_addr)(struct eeh_pe *pe); |
207 | int (*get_state)(struct eeh_pe *pe, int *state); | 206 | int (*get_state)(struct eeh_pe *pe, int *state); |
@@ -211,10 +210,10 @@ struct eeh_ops { | |||
211 | int (*configure_bridge)(struct eeh_pe *pe); | 210 | int (*configure_bridge)(struct eeh_pe *pe); |
212 | int (*err_inject)(struct eeh_pe *pe, int type, int func, | 211 | int (*err_inject)(struct eeh_pe *pe, int type, int func, |
213 | unsigned long addr, unsigned long mask); | 212 | unsigned long addr, unsigned long mask); |
214 | int (*read_config)(struct device_node *dn, int where, int size, u32 *val); | 213 | int (*read_config)(struct pci_dn *pdn, int where, int size, u32 *val); |
215 | int (*write_config)(struct device_node *dn, int where, int size, u32 val); | 214 | int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val); |
216 | int (*next_error)(struct eeh_pe **pe); | 215 | int (*next_error)(struct eeh_pe **pe); |
217 | int (*restore_config)(struct device_node *dn); | 216 | int (*restore_config)(struct pci_dn *pdn); |
218 | }; | 217 | }; |
219 | 218 | ||
220 | extern int eeh_subsystem_flags; | 219 | extern int eeh_subsystem_flags; |
@@ -272,7 +271,7 @@ void eeh_pe_restore_bars(struct eeh_pe *pe); | |||
272 | const char *eeh_pe_loc_get(struct eeh_pe *pe); | 271 | const char *eeh_pe_loc_get(struct eeh_pe *pe); |
273 | struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); | 272 | struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); |
274 | 273 | ||
275 | void *eeh_dev_init(struct device_node *dn, void *data); | 274 | void *eeh_dev_init(struct pci_dn *pdn, void *data); |
276 | void eeh_dev_phb_init_dynamic(struct pci_controller *phb); | 275 | void eeh_dev_phb_init_dynamic(struct pci_controller *phb); |
277 | int eeh_init(void); | 276 | int eeh_init(void); |
278 | int __init eeh_ops_register(struct eeh_ops *ops); | 277 | int __init eeh_ops_register(struct eeh_ops *ops); |
@@ -280,8 +279,8 @@ int __exit eeh_ops_unregister(const char *name); | |||
280 | int eeh_check_failure(const volatile void __iomem *token); | 279 | int eeh_check_failure(const volatile void __iomem *token); |
281 | int eeh_dev_check_failure(struct eeh_dev *edev); | 280 | int eeh_dev_check_failure(struct eeh_dev *edev); |
282 | void eeh_addr_cache_build(void); | 281 | void eeh_addr_cache_build(void); |
283 | void eeh_add_device_early(struct device_node *); | 282 | void eeh_add_device_early(struct pci_dn *); |
284 | void eeh_add_device_tree_early(struct device_node *); | 283 | void eeh_add_device_tree_early(struct pci_dn *); |
285 | void eeh_add_device_late(struct pci_dev *); | 284 | void eeh_add_device_late(struct pci_dev *); |
286 | void eeh_add_device_tree_late(struct pci_bus *); | 285 | void eeh_add_device_tree_late(struct pci_bus *); |
287 | void eeh_add_sysfs_files(struct pci_bus *); | 286 | void eeh_add_sysfs_files(struct pci_bus *); |
@@ -323,7 +322,7 @@ static inline int eeh_init(void) | |||
323 | return 0; | 322 | return 0; |
324 | } | 323 | } |
325 | 324 | ||
326 | static inline void *eeh_dev_init(struct device_node *dn, void *data) | 325 | static inline void *eeh_dev_init(struct pci_dn *pdn, void *data) |
327 | { | 326 | { |
328 | return NULL; | 327 | return NULL; |
329 | } | 328 | } |
@@ -339,9 +338,9 @@ static inline int eeh_check_failure(const volatile void __iomem *token) | |||
339 | 338 | ||
340 | static inline void eeh_addr_cache_build(void) { } | 339 | static inline void eeh_addr_cache_build(void) { } |
341 | 340 | ||
342 | static inline void eeh_add_device_early(struct device_node *dn) { } | 341 | static inline void eeh_add_device_early(struct pci_dn *pdn) { } |
343 | 342 | ||
344 | static inline void eeh_add_device_tree_early(struct device_node *dn) { } | 343 | static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { } |
345 | 344 | ||
346 | static inline void eeh_add_device_late(struct pci_dev *dev) { } | 345 | static inline void eeh_add_device_late(struct pci_dev *dev) { } |
347 | 346 | ||
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index c8175a3fe560..098d51e924ea 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -125,7 +125,7 @@ struct machdep_calls { | |||
125 | unsigned int (*get_irq)(void); | 125 | unsigned int (*get_irq)(void); |
126 | 126 | ||
127 | /* PCI stuff */ | 127 | /* PCI stuff */ |
128 | /* Called after scanning the bus, before allocating resources */ | 128 | /* Called after allocating resources */ |
129 | void (*pcibios_fixup)(void); | 129 | void (*pcibios_fixup)(void); |
130 | int (*pci_probe_mode)(struct pci_bus *); | 130 | int (*pci_probe_mode)(struct pci_bus *); |
131 | void (*pci_irq_fixup)(struct pci_dev *dev); | 131 | void (*pci_irq_fixup)(struct pci_dev *dev); |
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 546d036fe925..2c6dc2a3d14a 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h | |||
@@ -89,6 +89,7 @@ struct pci_controller { | |||
89 | 89 | ||
90 | #ifdef CONFIG_PPC64 | 90 | #ifdef CONFIG_PPC64 |
91 | unsigned long buid; | 91 | unsigned long buid; |
92 | struct pci_dn *pci_data; | ||
92 | #endif /* CONFIG_PPC64 */ | 93 | #endif /* CONFIG_PPC64 */ |
93 | 94 | ||
94 | void *private_data; | 95 | void *private_data; |
@@ -154,9 +155,15 @@ static inline int isa_vaddr_is_ioport(void __iomem *address) | |||
154 | struct iommu_table; | 155 | struct iommu_table; |
155 | 156 | ||
156 | struct pci_dn { | 157 | struct pci_dn { |
158 | int flags; | ||
159 | |||
157 | int busno; /* pci bus number */ | 160 | int busno; /* pci bus number */ |
158 | int devfn; /* pci device and function number */ | 161 | int devfn; /* pci device and function number */ |
162 | int vendor_id; /* Vendor ID */ | ||
163 | int device_id; /* Device ID */ | ||
164 | int class_code; /* Device class code */ | ||
159 | 165 | ||
166 | struct pci_dn *parent; | ||
160 | struct pci_controller *phb; /* for pci devices */ | 167 | struct pci_controller *phb; /* for pci devices */ |
161 | struct iommu_table *iommu_table; /* for phb's or bridges */ | 168 | struct iommu_table *iommu_table; /* for phb's or bridges */ |
162 | struct device_node *node; /* back-pointer to the device_node */ | 169 | struct device_node *node; /* back-pointer to the device_node */ |
@@ -171,14 +178,17 @@ struct pci_dn { | |||
171 | #ifdef CONFIG_PPC_POWERNV | 178 | #ifdef CONFIG_PPC_POWERNV |
172 | int pe_number; | 179 | int pe_number; |
173 | #endif | 180 | #endif |
181 | struct list_head child_list; | ||
182 | struct list_head list; | ||
174 | }; | 183 | }; |
175 | 184 | ||
176 | /* Get the pointer to a device_node's pci_dn */ | 185 | /* Get the pointer to a device_node's pci_dn */ |
177 | #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) | 186 | #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) |
178 | 187 | ||
188 | extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus, | ||
189 | int devfn); | ||
179 | extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev); | 190 | extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev); |
180 | 191 | extern void *update_dn_pci_info(struct device_node *dn, void *data); | |
181 | extern void * update_dn_pci_info(struct device_node *dn, void *data); | ||
182 | 192 | ||
183 | static inline int pci_device_from_OF_node(struct device_node *np, | 193 | static inline int pci_device_from_OF_node(struct device_node *np, |
184 | u8 *bus, u8 *devfn) | 194 | u8 *bus, u8 *devfn) |
@@ -191,20 +201,12 @@ static inline int pci_device_from_OF_node(struct device_node *np, | |||
191 | } | 201 | } |
192 | 202 | ||
193 | #if defined(CONFIG_EEH) | 203 | #if defined(CONFIG_EEH) |
194 | static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn) | 204 | static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn) |
195 | { | 205 | { |
196 | /* | 206 | return pdn ? pdn->edev : NULL; |
197 | * For those OF nodes whose parent isn't PCI bridge, they | ||
198 | * don't have PCI_DN actually. So we have to skip them for | ||
199 | * any EEH operations. | ||
200 | */ | ||
201 | if (!dn || !PCI_DN(dn)) | ||
202 | return NULL; | ||
203 | |||
204 | return PCI_DN(dn)->edev; | ||
205 | } | 207 | } |
206 | #else | 208 | #else |
207 | #define of_node_to_eeh_dev(x) (NULL) | 209 | #define pdn_to_eeh_dev(x) (NULL) |
208 | #endif | 210 | #endif |
209 | 211 | ||
210 | /** Find the bus corresponding to the indicated device node */ | 212 | /** Find the bus corresponding to the indicated device node */ |
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index db1e2b8eff3c..ade75238ceb5 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h | |||
@@ -33,9 +33,14 @@ extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */ | |||
33 | 33 | ||
34 | /* PCI device_node operations */ | 34 | /* PCI device_node operations */ |
35 | struct device_node; | 35 | struct device_node; |
36 | struct pci_dn; | ||
37 | |||
36 | typedef void *(*traverse_func)(struct device_node *me, void *data); | 38 | typedef void *(*traverse_func)(struct device_node *me, void *data); |
37 | void *traverse_pci_devices(struct device_node *start, traverse_func pre, | 39 | void *traverse_pci_devices(struct device_node *start, traverse_func pre, |
38 | void *data); | 40 | void *data); |
41 | void *traverse_pci_dn(struct pci_dn *root, | ||
42 | void *(*fn)(struct pci_dn *, void *), | ||
43 | void *data); | ||
39 | 44 | ||
40 | extern void pci_devs_phb_init(void); | 45 | extern void pci_devs_phb_init(void); |
41 | extern void pci_devs_phb_init_dynamic(struct pci_controller *phb); | 46 | extern void pci_devs_phb_init_dynamic(struct pci_controller *phb); |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 3b2252e7731b..76253eb146be 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -164,30 +164,34 @@ __setup("eeh=", eeh_setup); | |||
164 | */ | 164 | */ |
165 | static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | 165 | static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) |
166 | { | 166 | { |
167 | struct device_node *dn = eeh_dev_to_of_node(edev); | 167 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); |
168 | u32 cfg; | 168 | u32 cfg; |
169 | int cap, i; | 169 | int cap, i; |
170 | int n = 0, l = 0; | 170 | int n = 0, l = 0; |
171 | char buffer[128]; | 171 | char buffer[128]; |
172 | 172 | ||
173 | n += scnprintf(buf+n, len-n, "%s\n", dn->full_name); | 173 | n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n", |
174 | pr_warn("EEH: of node=%s\n", dn->full_name); | 174 | edev->phb->global_number, pdn->busno, |
175 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); | ||
176 | pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n", | ||
177 | edev->phb->global_number, pdn->busno, | ||
178 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); | ||
175 | 179 | ||
176 | eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg); | 180 | eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg); |
177 | n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); | 181 | n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); |
178 | pr_warn("EEH: PCI device/vendor: %08x\n", cfg); | 182 | pr_warn("EEH: PCI device/vendor: %08x\n", cfg); |
179 | 183 | ||
180 | eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg); | 184 | eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg); |
181 | n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); | 185 | n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); |
182 | pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); | 186 | pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); |
183 | 187 | ||
184 | /* Gather bridge-specific registers */ | 188 | /* Gather bridge-specific registers */ |
185 | if (edev->mode & EEH_DEV_BRIDGE) { | 189 | if (edev->mode & EEH_DEV_BRIDGE) { |
186 | eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg); | 190 | eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg); |
187 | n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); | 191 | n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); |
188 | pr_warn("EEH: Bridge secondary status: %04x\n", cfg); | 192 | pr_warn("EEH: Bridge secondary status: %04x\n", cfg); |
189 | 193 | ||
190 | eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg); | 194 | eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg); |
191 | n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); | 195 | n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); |
192 | pr_warn("EEH: Bridge control: %04x\n", cfg); | 196 | pr_warn("EEH: Bridge control: %04x\n", cfg); |
193 | } | 197 | } |
@@ -195,11 +199,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | |||
195 | /* Dump out the PCI-X command and status regs */ | 199 | /* Dump out the PCI-X command and status regs */ |
196 | cap = edev->pcix_cap; | 200 | cap = edev->pcix_cap; |
197 | if (cap) { | 201 | if (cap) { |
198 | eeh_ops->read_config(dn, cap, 4, &cfg); | 202 | eeh_ops->read_config(pdn, cap, 4, &cfg); |
199 | n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); | 203 | n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); |
200 | pr_warn("EEH: PCI-X cmd: %08x\n", cfg); | 204 | pr_warn("EEH: PCI-X cmd: %08x\n", cfg); |
201 | 205 | ||
202 | eeh_ops->read_config(dn, cap+4, 4, &cfg); | 206 | eeh_ops->read_config(pdn, cap+4, 4, &cfg); |
203 | n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); | 207 | n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); |
204 | pr_warn("EEH: PCI-X status: %08x\n", cfg); | 208 | pr_warn("EEH: PCI-X status: %08x\n", cfg); |
205 | } | 209 | } |
@@ -211,7 +215,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | |||
211 | pr_warn("EEH: PCI-E capabilities and status follow:\n"); | 215 | pr_warn("EEH: PCI-E capabilities and status follow:\n"); |
212 | 216 | ||
213 | for (i=0; i<=8; i++) { | 217 | for (i=0; i<=8; i++) { |
214 | eeh_ops->read_config(dn, cap+4*i, 4, &cfg); | 218 | eeh_ops->read_config(pdn, cap+4*i, 4, &cfg); |
215 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); | 219 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); |
216 | 220 | ||
217 | if ((i % 4) == 0) { | 221 | if ((i % 4) == 0) { |
@@ -238,7 +242,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | |||
238 | pr_warn("EEH: PCI-E AER capability register set follows:\n"); | 242 | pr_warn("EEH: PCI-E AER capability register set follows:\n"); |
239 | 243 | ||
240 | for (i=0; i<=13; i++) { | 244 | for (i=0; i<=13; i++) { |
241 | eeh_ops->read_config(dn, cap+4*i, 4, &cfg); | 245 | eeh_ops->read_config(pdn, cap+4*i, 4, &cfg); |
242 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); | 246 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); |
243 | 247 | ||
244 | if ((i % 4) == 0) { | 248 | if ((i % 4) == 0) { |
@@ -414,11 +418,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
414 | int ret; | 418 | int ret; |
415 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | 419 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); |
416 | unsigned long flags; | 420 | unsigned long flags; |
417 | struct device_node *dn; | 421 | struct pci_dn *pdn; |
418 | struct pci_dev *dev; | 422 | struct pci_dev *dev; |
419 | struct eeh_pe *pe, *parent_pe, *phb_pe; | 423 | struct eeh_pe *pe, *parent_pe, *phb_pe; |
420 | int rc = 0; | 424 | int rc = 0; |
421 | const char *location; | 425 | const char *location = NULL; |
422 | 426 | ||
423 | eeh_stats.total_mmio_ffs++; | 427 | eeh_stats.total_mmio_ffs++; |
424 | 428 | ||
@@ -429,15 +433,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
429 | eeh_stats.no_dn++; | 433 | eeh_stats.no_dn++; |
430 | return 0; | 434 | return 0; |
431 | } | 435 | } |
432 | dn = eeh_dev_to_of_node(edev); | ||
433 | dev = eeh_dev_to_pci_dev(edev); | 436 | dev = eeh_dev_to_pci_dev(edev); |
434 | pe = eeh_dev_to_pe(edev); | 437 | pe = eeh_dev_to_pe(edev); |
435 | 438 | ||
436 | /* Access to IO BARs might get this far and still not want checking. */ | 439 | /* Access to IO BARs might get this far and still not want checking. */ |
437 | if (!pe) { | 440 | if (!pe) { |
438 | eeh_stats.ignored_check++; | 441 | eeh_stats.ignored_check++; |
439 | pr_debug("EEH: Ignored check for %s %s\n", | 442 | pr_debug("EEH: Ignored check for %s\n", |
440 | eeh_pci_name(dev), dn->full_name); | 443 | eeh_pci_name(dev)); |
441 | return 0; | 444 | return 0; |
442 | } | 445 | } |
443 | 446 | ||
@@ -473,10 +476,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
473 | if (pe->state & EEH_PE_ISOLATED) { | 476 | if (pe->state & EEH_PE_ISOLATED) { |
474 | pe->check_count++; | 477 | pe->check_count++; |
475 | if (pe->check_count % EEH_MAX_FAILS == 0) { | 478 | if (pe->check_count % EEH_MAX_FAILS == 0) { |
476 | location = of_get_property(dn, "ibm,loc-code", NULL); | 479 | pdn = eeh_dev_to_pdn(edev); |
480 | if (pdn->node) | ||
481 | location = of_get_property(pdn->node, "ibm,loc-code", NULL); | ||
477 | printk(KERN_ERR "EEH: %d reads ignored for recovering device at " | 482 | printk(KERN_ERR "EEH: %d reads ignored for recovering device at " |
478 | "location=%s driver=%s pci addr=%s\n", | 483 | "location=%s driver=%s pci addr=%s\n", |
479 | pe->check_count, location, | 484 | pe->check_count, |
485 | location ? location : "unknown", | ||
480 | eeh_driver_name(dev), eeh_pci_name(dev)); | 486 | eeh_driver_name(dev), eeh_pci_name(dev)); |
481 | printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", | 487 | printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", |
482 | eeh_driver_name(dev)); | 488 | eeh_driver_name(dev)); |
@@ -667,6 +673,55 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) | |||
667 | return rc; | 673 | return rc; |
668 | } | 674 | } |
669 | 675 | ||
676 | static void *eeh_disable_and_save_dev_state(void *data, void *userdata) | ||
677 | { | ||
678 | struct eeh_dev *edev = data; | ||
679 | struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); | ||
680 | struct pci_dev *dev = userdata; | ||
681 | |||
682 | /* | ||
683 | * The caller should have disabled and saved the | ||
684 | * state for the specified device | ||
685 | */ | ||
686 | if (!pdev || pdev == dev) | ||
687 | return NULL; | ||
688 | |||
689 | /* Ensure we have D0 power state */ | ||
690 | pci_set_power_state(pdev, PCI_D0); | ||
691 | |||
692 | /* Save device state */ | ||
693 | pci_save_state(pdev); | ||
694 | |||
695 | /* | ||
696 | * Disable device to avoid any DMA traffic and | ||
697 | * interrupt from the device | ||
698 | */ | ||
699 | pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); | ||
700 | |||
701 | return NULL; | ||
702 | } | ||
703 | |||
704 | static void *eeh_restore_dev_state(void *data, void *userdata) | ||
705 | { | ||
706 | struct eeh_dev *edev = data; | ||
707 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
708 | struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); | ||
709 | struct pci_dev *dev = userdata; | ||
710 | |||
711 | if (!pdev) | ||
712 | return NULL; | ||
713 | |||
714 | /* Apply customization from firmware */ | ||
715 | if (pdn && eeh_ops->restore_config) | ||
716 | eeh_ops->restore_config(pdn); | ||
717 | |||
718 | /* The caller should restore state for the specified device */ | ||
719 | if (pdev != dev) | ||
720 | pci_save_state(pdev); | ||
721 | |||
722 | return NULL; | ||
723 | } | ||
724 | |||
670 | /** | 725 | /** |
671 | * pcibios_set_pcie_slot_reset - Set PCI-E reset state | 726 | * pcibios_set_pcie_slot_reset - Set PCI-E reset state |
672 | * @dev: pci device struct | 727 | * @dev: pci device struct |
@@ -689,13 +744,19 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
689 | switch (state) { | 744 | switch (state) { |
690 | case pcie_deassert_reset: | 745 | case pcie_deassert_reset: |
691 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); | 746 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); |
747 | eeh_unfreeze_pe(pe, false); | ||
692 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); | 748 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
749 | eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); | ||
693 | break; | 750 | break; |
694 | case pcie_hot_reset: | 751 | case pcie_hot_reset: |
752 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | ||
753 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); | ||
695 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | 754 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
696 | eeh_ops->reset(pe, EEH_RESET_HOT); | 755 | eeh_ops->reset(pe, EEH_RESET_HOT); |
697 | break; | 756 | break; |
698 | case pcie_warm_reset: | 757 | case pcie_warm_reset: |
758 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | ||
759 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); | ||
699 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | 760 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
700 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); | 761 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); |
701 | break; | 762 | break; |
@@ -815,15 +876,15 @@ out: | |||
815 | */ | 876 | */ |
816 | void eeh_save_bars(struct eeh_dev *edev) | 877 | void eeh_save_bars(struct eeh_dev *edev) |
817 | { | 878 | { |
879 | struct pci_dn *pdn; | ||
818 | int i; | 880 | int i; |
819 | struct device_node *dn; | ||
820 | 881 | ||
821 | if (!edev) | 882 | pdn = eeh_dev_to_pdn(edev); |
883 | if (!pdn) | ||
822 | return; | 884 | return; |
823 | dn = eeh_dev_to_of_node(edev); | ||
824 | 885 | ||
825 | for (i = 0; i < 16; i++) | 886 | for (i = 0; i < 16; i++) |
826 | eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]); | 887 | eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]); |
827 | 888 | ||
828 | /* | 889 | /* |
829 | * For PCI bridges including root port, we need enable bus | 890 | * For PCI bridges including root port, we need enable bus |
@@ -914,7 +975,7 @@ static struct notifier_block eeh_reboot_nb = { | |||
914 | int eeh_init(void) | 975 | int eeh_init(void) |
915 | { | 976 | { |
916 | struct pci_controller *hose, *tmp; | 977 | struct pci_controller *hose, *tmp; |
917 | struct device_node *phb; | 978 | struct pci_dn *pdn; |
918 | static int cnt = 0; | 979 | static int cnt = 0; |
919 | int ret = 0; | 980 | int ret = 0; |
920 | 981 | ||
@@ -949,20 +1010,9 @@ int eeh_init(void) | |||
949 | return ret; | 1010 | return ret; |
950 | 1011 | ||
951 | /* Enable EEH for all adapters */ | 1012 | /* Enable EEH for all adapters */ |
952 | if (eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) { | 1013 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { |
953 | list_for_each_entry_safe(hose, tmp, | 1014 | pdn = hose->pci_data; |
954 | &hose_list, list_node) { | 1015 | traverse_pci_dn(pdn, eeh_ops->probe, NULL); |
955 | phb = hose->dn; | ||
956 | traverse_pci_devices(phb, eeh_ops->of_probe, NULL); | ||
957 | } | ||
958 | } else if (eeh_has_flag(EEH_PROBE_MODE_DEV)) { | ||
959 | list_for_each_entry_safe(hose, tmp, | ||
960 | &hose_list, list_node) | ||
961 | pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL); | ||
962 | } else { | ||
963 | pr_warn("%s: Invalid probe mode %x", | ||
964 | __func__, eeh_subsystem_flags); | ||
965 | return -EINVAL; | ||
966 | } | 1016 | } |
967 | 1017 | ||
968 | /* | 1018 | /* |
@@ -987,8 +1037,8 @@ int eeh_init(void) | |||
987 | core_initcall_sync(eeh_init); | 1037 | core_initcall_sync(eeh_init); |
988 | 1038 | ||
989 | /** | 1039 | /** |
990 | * eeh_add_device_early - Enable EEH for the indicated device_node | 1040 | * eeh_add_device_early - Enable EEH for the indicated device node |
991 | * @dn: device node for which to set up EEH | 1041 | * @pdn: PCI device node for which to set up EEH |
992 | * | 1042 | * |
993 | * This routine must be used to perform EEH initialization for PCI | 1043 | * This routine must be used to perform EEH initialization for PCI |
994 | * devices that were added after system boot (e.g. hotplug, dlpar). | 1044 | * devices that were added after system boot (e.g. hotplug, dlpar). |
@@ -998,44 +1048,41 @@ core_initcall_sync(eeh_init); | |||
998 | * on the CEC architecture, type of the device, on earlier boot | 1048 | * on the CEC architecture, type of the device, on earlier boot |
999 | * command-line arguments & etc. | 1049 | * command-line arguments & etc. |
1000 | */ | 1050 | */ |
1001 | void eeh_add_device_early(struct device_node *dn) | 1051 | void eeh_add_device_early(struct pci_dn *pdn) |
1002 | { | 1052 | { |
1003 | struct pci_controller *phb; | 1053 | struct pci_controller *phb; |
1054 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); | ||
1004 | 1055 | ||
1005 | /* | 1056 | if (!edev) |
1006 | * If we're doing EEH probe based on PCI device, we | ||
1007 | * would delay the probe until late stage because | ||
1008 | * the PCI device isn't available this moment. | ||
1009 | */ | ||
1010 | if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) | ||
1011 | return; | ||
1012 | |||
1013 | if (!of_node_to_eeh_dev(dn)) | ||
1014 | return; | 1057 | return; |
1015 | phb = of_node_to_eeh_dev(dn)->phb; | ||
1016 | 1058 | ||
1017 | /* USB Bus children of PCI devices will not have BUID's */ | 1059 | /* USB Bus children of PCI devices will not have BUID's */ |
1018 | if (NULL == phb || 0 == phb->buid) | 1060 | phb = edev->phb; |
1061 | if (NULL == phb || | ||
1062 | (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid)) | ||
1019 | return; | 1063 | return; |
1020 | 1064 | ||
1021 | eeh_ops->of_probe(dn, NULL); | 1065 | eeh_ops->probe(pdn, NULL); |
1022 | } | 1066 | } |
1023 | 1067 | ||
1024 | /** | 1068 | /** |
1025 | * eeh_add_device_tree_early - Enable EEH for the indicated device | 1069 | * eeh_add_device_tree_early - Enable EEH for the indicated device |
1026 | * @dn: device node | 1070 | * @pdn: PCI device node |
1027 | * | 1071 | * |
1028 | * This routine must be used to perform EEH initialization for the | 1072 | * This routine must be used to perform EEH initialization for the |
1029 | * indicated PCI device that was added after system boot (e.g. | 1073 | * indicated PCI device that was added after system boot (e.g. |
1030 | * hotplug, dlpar). | 1074 | * hotplug, dlpar). |
1031 | */ | 1075 | */ |
1032 | void eeh_add_device_tree_early(struct device_node *dn) | 1076 | void eeh_add_device_tree_early(struct pci_dn *pdn) |
1033 | { | 1077 | { |
1034 | struct device_node *sib; | 1078 | struct pci_dn *n; |
1035 | 1079 | ||
1036 | for_each_child_of_node(dn, sib) | 1080 | if (!pdn) |
1037 | eeh_add_device_tree_early(sib); | 1081 | return; |
1038 | eeh_add_device_early(dn); | 1082 | |
1083 | list_for_each_entry(n, &pdn->child_list, list) | ||
1084 | eeh_add_device_tree_early(n); | ||
1085 | eeh_add_device_early(pdn); | ||
1039 | } | 1086 | } |
1040 | EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); | 1087 | EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); |
1041 | 1088 | ||
@@ -1048,7 +1095,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); | |||
1048 | */ | 1095 | */ |
1049 | void eeh_add_device_late(struct pci_dev *dev) | 1096 | void eeh_add_device_late(struct pci_dev *dev) |
1050 | { | 1097 | { |
1051 | struct device_node *dn; | 1098 | struct pci_dn *pdn; |
1052 | struct eeh_dev *edev; | 1099 | struct eeh_dev *edev; |
1053 | 1100 | ||
1054 | if (!dev || !eeh_enabled()) | 1101 | if (!dev || !eeh_enabled()) |
@@ -1056,8 +1103,8 @@ void eeh_add_device_late(struct pci_dev *dev) | |||
1056 | 1103 | ||
1057 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); | 1104 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); |
1058 | 1105 | ||
1059 | dn = pci_device_to_OF_node(dev); | 1106 | pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); |
1060 | edev = of_node_to_eeh_dev(dn); | 1107 | edev = pdn_to_eeh_dev(pdn); |
1061 | if (edev->pdev == dev) { | 1108 | if (edev->pdev == dev) { |
1062 | pr_debug("EEH: Already referenced !\n"); | 1109 | pr_debug("EEH: Already referenced !\n"); |
1063 | return; | 1110 | return; |
@@ -1089,13 +1136,6 @@ void eeh_add_device_late(struct pci_dev *dev) | |||
1089 | edev->pdev = dev; | 1136 | edev->pdev = dev; |
1090 | dev->dev.archdata.edev = edev; | 1137 | dev->dev.archdata.edev = edev; |
1091 | 1138 | ||
1092 | /* | ||
1093 | * We have to do the EEH probe here because the PCI device | ||
1094 | * hasn't been created yet in the early stage. | ||
1095 | */ | ||
1096 | if (eeh_has_flag(EEH_PROBE_MODE_DEV)) | ||
1097 | eeh_ops->dev_probe(dev, NULL); | ||
1098 | |||
1099 | eeh_addr_cache_insert_dev(dev); | 1139 | eeh_addr_cache_insert_dev(dev); |
1100 | } | 1140 | } |
1101 | 1141 | ||
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c index 07d8a2423a61..eeabeabea49c 100644 --- a/arch/powerpc/kernel/eeh_cache.c +++ b/arch/powerpc/kernel/eeh_cache.c | |||
@@ -171,30 +171,27 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo, | |||
171 | 171 | ||
172 | static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) | 172 | static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) |
173 | { | 173 | { |
174 | struct device_node *dn; | 174 | struct pci_dn *pdn; |
175 | struct eeh_dev *edev; | 175 | struct eeh_dev *edev; |
176 | int i; | 176 | int i; |
177 | 177 | ||
178 | dn = pci_device_to_OF_node(dev); | 178 | pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); |
179 | if (!dn) { | 179 | if (!pdn) { |
180 | pr_warn("PCI: no pci dn found for dev=%s\n", | 180 | pr_warn("PCI: no pci dn found for dev=%s\n", |
181 | pci_name(dev)); | 181 | pci_name(dev)); |
182 | return; | 182 | return; |
183 | } | 183 | } |
184 | 184 | ||
185 | edev = of_node_to_eeh_dev(dn); | 185 | edev = pdn_to_eeh_dev(pdn); |
186 | if (!edev) { | 186 | if (!edev) { |
187 | pr_warn("PCI: no EEH dev found for dn=%s\n", | 187 | pr_warn("PCI: no EEH dev found for %s\n", |
188 | dn->full_name); | 188 | pci_name(dev)); |
189 | return; | 189 | return; |
190 | } | 190 | } |
191 | 191 | ||
192 | /* Skip any devices for which EEH is not enabled. */ | 192 | /* Skip any devices for which EEH is not enabled. */ |
193 | if (!edev->pe) { | 193 | if (!edev->pe) { |
194 | #ifdef DEBUG | 194 | dev_dbg(&dev->dev, "EEH: Skip building address cache\n"); |
195 | pr_info("PCI: skip building address cache for=%s - %s\n", | ||
196 | pci_name(dev), dn->full_name); | ||
197 | #endif | ||
198 | return; | 195 | return; |
199 | } | 196 | } |
200 | 197 | ||
@@ -282,18 +279,18 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev) | |||
282 | */ | 279 | */ |
283 | void eeh_addr_cache_build(void) | 280 | void eeh_addr_cache_build(void) |
284 | { | 281 | { |
285 | struct device_node *dn; | 282 | struct pci_dn *pdn; |
286 | struct eeh_dev *edev; | 283 | struct eeh_dev *edev; |
287 | struct pci_dev *dev = NULL; | 284 | struct pci_dev *dev = NULL; |
288 | 285 | ||
289 | spin_lock_init(&pci_io_addr_cache_root.piar_lock); | 286 | spin_lock_init(&pci_io_addr_cache_root.piar_lock); |
290 | 287 | ||
291 | for_each_pci_dev(dev) { | 288 | for_each_pci_dev(dev) { |
292 | dn = pci_device_to_OF_node(dev); | 289 | pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); |
293 | if (!dn) | 290 | if (!pdn) |
294 | continue; | 291 | continue; |
295 | 292 | ||
296 | edev = of_node_to_eeh_dev(dn); | 293 | edev = pdn_to_eeh_dev(pdn); |
297 | if (!edev) | 294 | if (!edev) |
298 | continue; | 295 | continue; |
299 | 296 | ||
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index e5274ee9a75f..aabba94ff9cb 100644 --- a/arch/powerpc/kernel/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c | |||
@@ -43,13 +43,13 @@ | |||
43 | 43 | ||
44 | /** | 44 | /** |
45 | * eeh_dev_init - Create EEH device according to OF node | 45 | * eeh_dev_init - Create EEH device according to OF node |
46 | * @dn: device node | 46 | * @pdn: PCI device node |
47 | * @data: PHB | 47 | * @data: PHB |
48 | * | 48 | * |
49 | * It will create EEH device according to the given OF node. The function | 49 | * It will create EEH device according to the given OF node. The function |
50 | * might be called by PCI emunation, DR, PHB hotplug. | 50 | * might be called by PCI emunation, DR, PHB hotplug. |
51 | */ | 51 | */ |
52 | void *eeh_dev_init(struct device_node *dn, void *data) | 52 | void *eeh_dev_init(struct pci_dn *pdn, void *data) |
53 | { | 53 | { |
54 | struct pci_controller *phb = data; | 54 | struct pci_controller *phb = data; |
55 | struct eeh_dev *edev; | 55 | struct eeh_dev *edev; |
@@ -63,8 +63,8 @@ void *eeh_dev_init(struct device_node *dn, void *data) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | /* Associate EEH device with OF node */ | 65 | /* Associate EEH device with OF node */ |
66 | PCI_DN(dn)->edev = edev; | 66 | pdn->edev = edev; |
67 | edev->dn = dn; | 67 | edev->pdn = pdn; |
68 | edev->phb = phb; | 68 | edev->phb = phb; |
69 | INIT_LIST_HEAD(&edev->list); | 69 | INIT_LIST_HEAD(&edev->list); |
70 | 70 | ||
@@ -80,16 +80,16 @@ void *eeh_dev_init(struct device_node *dn, void *data) | |||
80 | */ | 80 | */ |
81 | void eeh_dev_phb_init_dynamic(struct pci_controller *phb) | 81 | void eeh_dev_phb_init_dynamic(struct pci_controller *phb) |
82 | { | 82 | { |
83 | struct device_node *dn = phb->dn; | 83 | struct pci_dn *root = phb->pci_data; |
84 | 84 | ||
85 | /* EEH PE for PHB */ | 85 | /* EEH PE for PHB */ |
86 | eeh_phb_pe_create(phb); | 86 | eeh_phb_pe_create(phb); |
87 | 87 | ||
88 | /* EEH device for PHB */ | 88 | /* EEH device for PHB */ |
89 | eeh_dev_init(dn, phb); | 89 | eeh_dev_init(root, phb); |
90 | 90 | ||
91 | /* EEH devices for children OF nodes */ | 91 | /* EEH devices for children OF nodes */ |
92 | traverse_pci_devices(dn, eeh_dev_init, phb); | 92 | traverse_pci_dn(root, eeh_dev_init, phb); |
93 | } | 93 | } |
94 | 94 | ||
95 | /** | 95 | /** |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index d099540c0f56..24768ff3cb73 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -83,28 +83,6 @@ static inline void eeh_pcid_put(struct pci_dev *pdev) | |||
83 | module_put(pdev->driver->driver.owner); | 83 | module_put(pdev->driver->driver.owner); |
84 | } | 84 | } |
85 | 85 | ||
86 | #if 0 | ||
87 | static void print_device_node_tree(struct pci_dn *pdn, int dent) | ||
88 | { | ||
89 | int i; | ||
90 | struct device_node *pc; | ||
91 | |||
92 | if (!pdn) | ||
93 | return; | ||
94 | for (i = 0; i < dent; i++) | ||
95 | printk(" "); | ||
96 | printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n", | ||
97 | pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr, | ||
98 | pdn->eeh_pe_config_addr, pdn->node->full_name); | ||
99 | dent += 3; | ||
100 | pc = pdn->node->child; | ||
101 | while (pc) { | ||
102 | print_device_node_tree(PCI_DN(pc), dent); | ||
103 | pc = pc->sibling; | ||
104 | } | ||
105 | } | ||
106 | #endif | ||
107 | |||
108 | /** | 86 | /** |
109 | * eeh_disable_irq - Disable interrupt for the recovering device | 87 | * eeh_disable_irq - Disable interrupt for the recovering device |
110 | * @dev: PCI device | 88 | * @dev: PCI device |
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 1e4946c36f9e..35f0b62259bb 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
@@ -291,27 +291,25 @@ struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) | |||
291 | */ | 291 | */ |
292 | static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) | 292 | static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) |
293 | { | 293 | { |
294 | struct device_node *dn; | ||
295 | struct eeh_dev *parent; | 294 | struct eeh_dev *parent; |
295 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
296 | 296 | ||
297 | /* | 297 | /* |
298 | * It might have the case for the indirect parent | 298 | * It might have the case for the indirect parent |
299 | * EEH device already having associated PE, but | 299 | * EEH device already having associated PE, but |
300 | * the direct parent EEH device doesn't have yet. | 300 | * the direct parent EEH device doesn't have yet. |
301 | */ | 301 | */ |
302 | dn = edev->dn->parent; | 302 | pdn = pdn ? pdn->parent : NULL; |
303 | while (dn) { | 303 | while (pdn) { |
304 | /* We're poking out of PCI territory */ | 304 | /* We're poking out of PCI territory */ |
305 | if (!PCI_DN(dn)) return NULL; | 305 | parent = pdn_to_eeh_dev(pdn); |
306 | 306 | if (!parent) | |
307 | parent = of_node_to_eeh_dev(dn); | 307 | return NULL; |
308 | /* We're poking out of PCI territory */ | ||
309 | if (!parent) return NULL; | ||
310 | 308 | ||
311 | if (parent->pe) | 309 | if (parent->pe) |
312 | return parent->pe; | 310 | return parent->pe; |
313 | 311 | ||
314 | dn = dn->parent; | 312 | pdn = pdn->parent; |
315 | } | 313 | } |
316 | 314 | ||
317 | return NULL; | 315 | return NULL; |
@@ -330,6 +328,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) | |||
330 | { | 328 | { |
331 | struct eeh_pe *pe, *parent; | 329 | struct eeh_pe *pe, *parent; |
332 | 330 | ||
331 | /* Check if the PE number is valid */ | ||
332 | if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { | ||
333 | pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n", | ||
334 | __func__, edev->config_addr, edev->phb->global_number); | ||
335 | return -EINVAL; | ||
336 | } | ||
337 | |||
333 | /* | 338 | /* |
334 | * Search the PE has been existing or not according | 339 | * Search the PE has been existing or not according |
335 | * to the PE address. If that has been existing, the | 340 | * to the PE address. If that has been existing, the |
@@ -338,21 +343,18 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) | |||
338 | */ | 343 | */ |
339 | pe = eeh_pe_get(edev); | 344 | pe = eeh_pe_get(edev); |
340 | if (pe && !(pe->type & EEH_PE_INVALID)) { | 345 | if (pe && !(pe->type & EEH_PE_INVALID)) { |
341 | if (!edev->pe_config_addr) { | ||
342 | pr_err("%s: PE with addr 0x%x already exists\n", | ||
343 | __func__, edev->config_addr); | ||
344 | return -EEXIST; | ||
345 | } | ||
346 | |||
347 | /* Mark the PE as type of PCI bus */ | 346 | /* Mark the PE as type of PCI bus */ |
348 | pe->type = EEH_PE_BUS; | 347 | pe->type = EEH_PE_BUS; |
349 | edev->pe = pe; | 348 | edev->pe = pe; |
350 | 349 | ||
351 | /* Put the edev to PE */ | 350 | /* Put the edev to PE */ |
352 | list_add_tail(&edev->list, &pe->edevs); | 351 | list_add_tail(&edev->list, &pe->edevs); |
353 | pr_debug("EEH: Add %s to Bus PE#%x\n", | 352 | pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n", |
354 | edev->dn->full_name, pe->addr); | 353 | edev->phb->global_number, |
355 | 354 | edev->config_addr >> 8, | |
355 | PCI_SLOT(edev->config_addr & 0xFF), | ||
356 | PCI_FUNC(edev->config_addr & 0xFF), | ||
357 | pe->addr); | ||
356 | return 0; | 358 | return 0; |
357 | } else if (pe && (pe->type & EEH_PE_INVALID)) { | 359 | } else if (pe && (pe->type & EEH_PE_INVALID)) { |
358 | list_add_tail(&edev->list, &pe->edevs); | 360 | list_add_tail(&edev->list, &pe->edevs); |
@@ -368,9 +370,14 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) | |||
368 | parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); | 370 | parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); |
369 | parent = parent->parent; | 371 | parent = parent->parent; |
370 | } | 372 | } |
371 | pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", | ||
372 | edev->dn->full_name, pe->addr, pe->parent->addr); | ||
373 | 373 | ||
374 | pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device " | ||
375 | "PE#%x, Parent PE#%x\n", | ||
376 | edev->phb->global_number, | ||
377 | edev->config_addr >> 8, | ||
378 | PCI_SLOT(edev->config_addr & 0xFF), | ||
379 | PCI_FUNC(edev->config_addr & 0xFF), | ||
380 | pe->addr, pe->parent->addr); | ||
374 | return 0; | 381 | return 0; |
375 | } | 382 | } |
376 | 383 | ||
@@ -409,8 +416,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) | |||
409 | list_add_tail(&pe->child, &parent->child_list); | 416 | list_add_tail(&pe->child, &parent->child_list); |
410 | list_add_tail(&edev->list, &pe->edevs); | 417 | list_add_tail(&edev->list, &pe->edevs); |
411 | edev->pe = pe; | 418 | edev->pe = pe; |
412 | pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", | 419 | pr_debug("EEH: Add %04x:%02x:%02x.%01x to " |
413 | edev->dn->full_name, pe->addr, pe->parent->addr); | 420 | "Device PE#%x, Parent PE#%x\n", |
421 | edev->phb->global_number, | ||
422 | edev->config_addr >> 8, | ||
423 | PCI_SLOT(edev->config_addr & 0xFF), | ||
424 | PCI_FUNC(edev->config_addr & 0xFF), | ||
425 | pe->addr, pe->parent->addr); | ||
414 | 426 | ||
415 | return 0; | 427 | return 0; |
416 | } | 428 | } |
@@ -430,8 +442,11 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) | |||
430 | int cnt; | 442 | int cnt; |
431 | 443 | ||
432 | if (!edev->pe) { | 444 | if (!edev->pe) { |
433 | pr_debug("%s: No PE found for EEH device %s\n", | 445 | pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n", |
434 | __func__, edev->dn->full_name); | 446 | __func__, edev->phb->global_number, |
447 | edev->config_addr >> 8, | ||
448 | PCI_SLOT(edev->config_addr & 0xFF), | ||
449 | PCI_FUNC(edev->config_addr & 0xFF)); | ||
435 | return -EEXIST; | 450 | return -EEXIST; |
436 | } | 451 | } |
437 | 452 | ||
@@ -653,9 +668,9 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state) | |||
653 | * blocked on normal path during the stage. So we need utilize | 668 | * blocked on normal path during the stage. So we need utilize |
654 | * eeh operations, which is always permitted. | 669 | * eeh operations, which is always permitted. |
655 | */ | 670 | */ |
656 | static void eeh_bridge_check_link(struct eeh_dev *edev, | 671 | static void eeh_bridge_check_link(struct eeh_dev *edev) |
657 | struct device_node *dn) | ||
658 | { | 672 | { |
673 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
659 | int cap; | 674 | int cap; |
660 | uint32_t val; | 675 | uint32_t val; |
661 | int timeout = 0; | 676 | int timeout = 0; |
@@ -675,32 +690,32 @@ static void eeh_bridge_check_link(struct eeh_dev *edev, | |||
675 | 690 | ||
676 | /* Check slot status */ | 691 | /* Check slot status */ |
677 | cap = edev->pcie_cap; | 692 | cap = edev->pcie_cap; |
678 | eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val); | 693 | eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val); |
679 | if (!(val & PCI_EXP_SLTSTA_PDS)) { | 694 | if (!(val & PCI_EXP_SLTSTA_PDS)) { |
680 | pr_debug(" No card in the slot (0x%04x) !\n", val); | 695 | pr_debug(" No card in the slot (0x%04x) !\n", val); |
681 | return; | 696 | return; |
682 | } | 697 | } |
683 | 698 | ||
684 | /* Check power status if we have the capability */ | 699 | /* Check power status if we have the capability */ |
685 | eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val); | 700 | eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val); |
686 | if (val & PCI_EXP_SLTCAP_PCP) { | 701 | if (val & PCI_EXP_SLTCAP_PCP) { |
687 | eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val); | 702 | eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val); |
688 | if (val & PCI_EXP_SLTCTL_PCC) { | 703 | if (val & PCI_EXP_SLTCTL_PCC) { |
689 | pr_debug(" In power-off state, power it on ...\n"); | 704 | pr_debug(" In power-off state, power it on ...\n"); |
690 | val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); | 705 | val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); |
691 | val |= (0x0100 & PCI_EXP_SLTCTL_PIC); | 706 | val |= (0x0100 & PCI_EXP_SLTCTL_PIC); |
692 | eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val); | 707 | eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val); |
693 | msleep(2 * 1000); | 708 | msleep(2 * 1000); |
694 | } | 709 | } |
695 | } | 710 | } |
696 | 711 | ||
697 | /* Enable link */ | 712 | /* Enable link */ |
698 | eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val); | 713 | eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val); |
699 | val &= ~PCI_EXP_LNKCTL_LD; | 714 | val &= ~PCI_EXP_LNKCTL_LD; |
700 | eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val); | 715 | eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val); |
701 | 716 | ||
702 | /* Check link */ | 717 | /* Check link */ |
703 | eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val); | 718 | eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val); |
704 | if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { | 719 | if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { |
705 | pr_debug(" No link reporting capability (0x%08x) \n", val); | 720 | pr_debug(" No link reporting capability (0x%08x) \n", val); |
706 | msleep(1000); | 721 | msleep(1000); |
@@ -713,7 +728,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev, | |||
713 | msleep(20); | 728 | msleep(20); |
714 | timeout += 20; | 729 | timeout += 20; |
715 | 730 | ||
716 | eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val); | 731 | eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val); |
717 | if (val & PCI_EXP_LNKSTA_DLLLA) | 732 | if (val & PCI_EXP_LNKSTA_DLLLA) |
718 | break; | 733 | break; |
719 | } | 734 | } |
@@ -728,9 +743,9 @@ static void eeh_bridge_check_link(struct eeh_dev *edev, | |||
728 | #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) | 743 | #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) |
729 | #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) | 744 | #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) |
730 | 745 | ||
731 | static void eeh_restore_bridge_bars(struct eeh_dev *edev, | 746 | static void eeh_restore_bridge_bars(struct eeh_dev *edev) |
732 | struct device_node *dn) | ||
733 | { | 747 | { |
748 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
734 | int i; | 749 | int i; |
735 | 750 | ||
736 | /* | 751 | /* |
@@ -738,49 +753,49 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev, | |||
738 | * Bus numbers and windows: 0x18 - 0x30 | 753 | * Bus numbers and windows: 0x18 - 0x30 |
739 | */ | 754 | */ |
740 | for (i = 4; i < 13; i++) | 755 | for (i = 4; i < 13; i++) |
741 | eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); | 756 | eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); |
742 | /* Rom: 0x38 */ | 757 | /* Rom: 0x38 */ |
743 | eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]); | 758 | eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]); |
744 | 759 | ||
745 | /* Cache line & Latency timer: 0xC 0xD */ | 760 | /* Cache line & Latency timer: 0xC 0xD */ |
746 | eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, | 761 | eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, |
747 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); | 762 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); |
748 | eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, | 763 | eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, |
749 | SAVED_BYTE(PCI_LATENCY_TIMER)); | 764 | SAVED_BYTE(PCI_LATENCY_TIMER)); |
750 | /* Max latency, min grant, interrupt ping and line: 0x3C */ | 765 | /* Max latency, min grant, interrupt ping and line: 0x3C */ |
751 | eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); | 766 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); |
752 | 767 | ||
753 | /* PCI Command: 0x4 */ | 768 | /* PCI Command: 0x4 */ |
754 | eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]); | 769 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); |
755 | 770 | ||
756 | /* Check the PCIe link is ready */ | 771 | /* Check the PCIe link is ready */ |
757 | eeh_bridge_check_link(edev, dn); | 772 | eeh_bridge_check_link(edev); |
758 | } | 773 | } |
759 | 774 | ||
760 | static void eeh_restore_device_bars(struct eeh_dev *edev, | 775 | static void eeh_restore_device_bars(struct eeh_dev *edev) |
761 | struct device_node *dn) | ||
762 | { | 776 | { |
777 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
763 | int i; | 778 | int i; |
764 | u32 cmd; | 779 | u32 cmd; |
765 | 780 | ||
766 | for (i = 4; i < 10; i++) | 781 | for (i = 4; i < 10; i++) |
767 | eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); | 782 | eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); |
768 | /* 12 == Expansion ROM Address */ | 783 | /* 12 == Expansion ROM Address */ |
769 | eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); | 784 | eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]); |
770 | 785 | ||
771 | eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, | 786 | eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, |
772 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); | 787 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); |
773 | eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, | 788 | eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, |
774 | SAVED_BYTE(PCI_LATENCY_TIMER)); | 789 | SAVED_BYTE(PCI_LATENCY_TIMER)); |
775 | 790 | ||
776 | /* max latency, min grant, interrupt pin and line */ | 791 | /* max latency, min grant, interrupt pin and line */ |
777 | eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); | 792 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); |
778 | 793 | ||
779 | /* | 794 | /* |
780 | * Restore PERR & SERR bits, some devices require it, | 795 | * Restore PERR & SERR bits, some devices require it, |
781 | * don't touch the other command bits | 796 | * don't touch the other command bits |
782 | */ | 797 | */ |
783 | eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd); | 798 | eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd); |
784 | if (edev->config_space[1] & PCI_COMMAND_PARITY) | 799 | if (edev->config_space[1] & PCI_COMMAND_PARITY) |
785 | cmd |= PCI_COMMAND_PARITY; | 800 | cmd |= PCI_COMMAND_PARITY; |
786 | else | 801 | else |
@@ -789,7 +804,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev, | |||
789 | cmd |= PCI_COMMAND_SERR; | 804 | cmd |= PCI_COMMAND_SERR; |
790 | else | 805 | else |
791 | cmd &= ~PCI_COMMAND_SERR; | 806 | cmd &= ~PCI_COMMAND_SERR; |
792 | eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); | 807 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd); |
793 | } | 808 | } |
794 | 809 | ||
795 | /** | 810 | /** |
@@ -804,16 +819,16 @@ static void eeh_restore_device_bars(struct eeh_dev *edev, | |||
804 | static void *eeh_restore_one_device_bars(void *data, void *flag) | 819 | static void *eeh_restore_one_device_bars(void *data, void *flag) |
805 | { | 820 | { |
806 | struct eeh_dev *edev = (struct eeh_dev *)data; | 821 | struct eeh_dev *edev = (struct eeh_dev *)data; |
807 | struct device_node *dn = eeh_dev_to_of_node(edev); | 822 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); |
808 | 823 | ||
809 | /* Do special restore for bridges */ | 824 | /* Do special restore for bridges */ |
810 | if (edev->mode & EEH_DEV_BRIDGE) | 825 | if (edev->mode & EEH_DEV_BRIDGE) |
811 | eeh_restore_bridge_bars(edev, dn); | 826 | eeh_restore_bridge_bars(edev); |
812 | else | 827 | else |
813 | eeh_restore_device_bars(edev, dn); | 828 | eeh_restore_device_bars(edev); |
814 | 829 | ||
815 | if (eeh_ops->restore_config) | 830 | if (eeh_ops->restore_config && pdn) |
816 | eeh_ops->restore_config(dn); | 831 | eeh_ops->restore_config(pdn); |
817 | 832 | ||
818 | return NULL; | 833 | return NULL; |
819 | } | 834 | } |
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 2f35a72642c6..b60a67d92ebd 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c | |||
@@ -72,7 +72,7 @@ static int of_pci_phb_probe(struct platform_device *dev) | |||
72 | 72 | ||
73 | /* Register devices with EEH */ | 73 | /* Register devices with EEH */ |
74 | if (dev->dev.of_node->child) | 74 | if (dev->dev.of_node->child) |
75 | eeh_add_device_tree_early(dev->dev.of_node); | 75 | eeh_add_device_tree_early(PCI_DN(dev->dev.of_node)); |
76 | 76 | ||
77 | /* Scan the bus */ | 77 | /* Scan the bus */ |
78 | pcibios_scan_phb(phb); | 78 | pcibios_scan_phb(phb); |
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index 5b789177aa29..18d9575729a3 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c | |||
@@ -75,7 +75,7 @@ void pcibios_add_pci_devices(struct pci_bus * bus) | |||
75 | struct pci_dev *dev; | 75 | struct pci_dev *dev; |
76 | struct device_node *dn = pci_bus_to_OF_node(bus); | 76 | struct device_node *dn = pci_bus_to_OF_node(bus); |
77 | 77 | ||
78 | eeh_add_device_tree_early(dn); | 78 | eeh_add_device_tree_early(PCI_DN(dn)); |
79 | 79 | ||
80 | mode = PCI_PROBE_NORMAL; | 80 | mode = PCI_PROBE_NORMAL; |
81 | if (ppc_md.pci_probe_mode) | 81 | if (ppc_md.pci_probe_mode) |
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 83df3075d3df..65b98367005c 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c | |||
@@ -32,12 +32,108 @@ | |||
32 | #include <asm/ppc-pci.h> | 32 | #include <asm/ppc-pci.h> |
33 | #include <asm/firmware.h> | 33 | #include <asm/firmware.h> |
34 | 34 | ||
35 | /* | ||
36 | * The function is used to find the firmware data of one | ||
37 | * specific PCI device, which is attached to the indicated | ||
38 | * PCI bus. For VFs, their firmware data is linked to that | ||
39 | * one of PF's bridge. For other devices, their firmware | ||
40 | * data is linked to that of their bridge. | ||
41 | */ | ||
42 | static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus) | ||
43 | { | ||
44 | struct pci_bus *pbus; | ||
45 | struct device_node *dn; | ||
46 | struct pci_dn *pdn; | ||
47 | |||
48 | /* | ||
49 | * We probably have virtual bus which doesn't | ||
50 | * have associated bridge. | ||
51 | */ | ||
52 | pbus = bus; | ||
53 | while (pbus) { | ||
54 | if (pci_is_root_bus(pbus) || pbus->self) | ||
55 | break; | ||
56 | |||
57 | pbus = pbus->parent; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Except virtual bus, all PCI buses should | ||
62 | * have device nodes. | ||
63 | */ | ||
64 | dn = pci_bus_to_OF_node(pbus); | ||
65 | pdn = dn ? PCI_DN(dn) : NULL; | ||
66 | |||
67 | return pdn; | ||
68 | } | ||
69 | |||
70 | struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus, | ||
71 | int devfn) | ||
72 | { | ||
73 | struct device_node *dn = NULL; | ||
74 | struct pci_dn *parent, *pdn; | ||
75 | struct pci_dev *pdev = NULL; | ||
76 | |||
77 | /* Fast path: fetch from PCI device */ | ||
78 | list_for_each_entry(pdev, &bus->devices, bus_list) { | ||
79 | if (pdev->devfn == devfn) { | ||
80 | if (pdev->dev.archdata.pci_data) | ||
81 | return pdev->dev.archdata.pci_data; | ||
82 | |||
83 | dn = pci_device_to_OF_node(pdev); | ||
84 | break; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* Fast path: fetch from device node */ | ||
89 | pdn = dn ? PCI_DN(dn) : NULL; | ||
90 | if (pdn) | ||
91 | return pdn; | ||
92 | |||
93 | /* Slow path: fetch from firmware data hierarchy */ | ||
94 | parent = pci_bus_to_pdn(bus); | ||
95 | if (!parent) | ||
96 | return NULL; | ||
97 | |||
98 | list_for_each_entry(pdn, &parent->child_list, list) { | ||
99 | if (pdn->busno == bus->number && | ||
100 | pdn->devfn == devfn) | ||
101 | return pdn; | ||
102 | } | ||
103 | |||
104 | return NULL; | ||
105 | } | ||
106 | |||
35 | struct pci_dn *pci_get_pdn(struct pci_dev *pdev) | 107 | struct pci_dn *pci_get_pdn(struct pci_dev *pdev) |
36 | { | 108 | { |
37 | struct device_node *dn = pci_device_to_OF_node(pdev); | 109 | struct device_node *dn; |
38 | if (!dn) | 110 | struct pci_dn *parent, *pdn; |
111 | |||
112 | /* Search device directly */ | ||
113 | if (pdev->dev.archdata.pci_data) | ||
114 | return pdev->dev.archdata.pci_data; | ||
115 | |||
116 | /* Check device node */ | ||
117 | dn = pci_device_to_OF_node(pdev); | ||
118 | pdn = dn ? PCI_DN(dn) : NULL; | ||
119 | if (pdn) | ||
120 | return pdn; | ||
121 | |||
122 | /* | ||
123 | * VFs don't have device nodes. We hook their | ||
124 | * firmware data to PF's bridge. | ||
125 | */ | ||
126 | parent = pci_bus_to_pdn(pdev->bus); | ||
127 | if (!parent) | ||
39 | return NULL; | 128 | return NULL; |
40 | return PCI_DN(dn); | 129 | |
130 | list_for_each_entry(pdn, &parent->child_list, list) { | ||
131 | if (pdn->busno == pdev->bus->number && | ||
132 | pdn->devfn == pdev->devfn) | ||
133 | return pdn; | ||
134 | } | ||
135 | |||
136 | return NULL; | ||
41 | } | 137 | } |
42 | 138 | ||
43 | /* | 139 | /* |
@@ -49,6 +145,7 @@ void *update_dn_pci_info(struct device_node *dn, void *data) | |||
49 | struct pci_controller *phb = data; | 145 | struct pci_controller *phb = data; |
50 | const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL); | 146 | const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL); |
51 | const __be32 *regs; | 147 | const __be32 *regs; |
148 | struct device_node *parent; | ||
52 | struct pci_dn *pdn; | 149 | struct pci_dn *pdn; |
53 | 150 | ||
54 | pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); | 151 | pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); |
@@ -69,7 +166,25 @@ void *update_dn_pci_info(struct device_node *dn, void *data) | |||
69 | pdn->devfn = (addr >> 8) & 0xff; | 166 | pdn->devfn = (addr >> 8) & 0xff; |
70 | } | 167 | } |
71 | 168 | ||
169 | /* vendor/device IDs and class code */ | ||
170 | regs = of_get_property(dn, "vendor-id", NULL); | ||
171 | pdn->vendor_id = regs ? of_read_number(regs, 1) : 0; | ||
172 | regs = of_get_property(dn, "device-id", NULL); | ||
173 | pdn->device_id = regs ? of_read_number(regs, 1) : 0; | ||
174 | regs = of_get_property(dn, "class-code", NULL); | ||
175 | pdn->class_code = regs ? of_read_number(regs, 1) : 0; | ||
176 | |||
177 | /* Extended config space */ | ||
72 | pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1); | 178 | pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1); |
179 | |||
180 | /* Attach to parent node */ | ||
181 | INIT_LIST_HEAD(&pdn->child_list); | ||
182 | INIT_LIST_HEAD(&pdn->list); | ||
183 | parent = of_get_parent(dn); | ||
184 | pdn->parent = parent ? PCI_DN(parent) : NULL; | ||
185 | if (pdn->parent) | ||
186 | list_add_tail(&pdn->list, &pdn->parent->child_list); | ||
187 | |||
73 | return NULL; | 188 | return NULL; |
74 | } | 189 | } |
75 | 190 | ||
@@ -131,6 +246,46 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, | |||
131 | return NULL; | 246 | return NULL; |
132 | } | 247 | } |
133 | 248 | ||
249 | static struct pci_dn *pci_dn_next_one(struct pci_dn *root, | ||
250 | struct pci_dn *pdn) | ||
251 | { | ||
252 | struct list_head *next = pdn->child_list.next; | ||
253 | |||
254 | if (next != &pdn->child_list) | ||
255 | return list_entry(next, struct pci_dn, list); | ||
256 | |||
257 | while (1) { | ||
258 | if (pdn == root) | ||
259 | return NULL; | ||
260 | |||
261 | next = pdn->list.next; | ||
262 | if (next != &pdn->parent->child_list) | ||
263 | break; | ||
264 | |||
265 | pdn = pdn->parent; | ||
266 | } | ||
267 | |||
268 | return list_entry(next, struct pci_dn, list); | ||
269 | } | ||
270 | |||
271 | void *traverse_pci_dn(struct pci_dn *root, | ||
272 | void *(*fn)(struct pci_dn *, void *), | ||
273 | void *data) | ||
274 | { | ||
275 | struct pci_dn *pdn = root; | ||
276 | void *ret; | ||
277 | |||
278 | /* Only scan the child nodes */ | ||
279 | for (pdn = pci_dn_next_one(root, pdn); pdn; | ||
280 | pdn = pci_dn_next_one(root, pdn)) { | ||
281 | ret = fn(pdn, data); | ||
282 | if (ret) | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | return NULL; | ||
287 | } | ||
288 | |||
134 | /** | 289 | /** |
135 | * pci_devs_phb_init_dynamic - setup pci devices under this PHB | 290 | * pci_devs_phb_init_dynamic - setup pci devices under this PHB |
136 | * phb: pci-to-host bridge (top-level bridge connecting to cpu) | 291 | * phb: pci-to-host bridge (top-level bridge connecting to cpu) |
@@ -147,8 +302,12 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb) | |||
147 | /* PHB nodes themselves must not match */ | 302 | /* PHB nodes themselves must not match */ |
148 | update_dn_pci_info(dn, phb); | 303 | update_dn_pci_info(dn, phb); |
149 | pdn = dn->data; | 304 | pdn = dn->data; |
150 | if (pdn) | 305 | if (pdn) { |
151 | pdn->devfn = pdn->busno = -1; | 306 | pdn->devfn = pdn->busno = -1; |
307 | pdn->vendor_id = pdn->device_id = pdn->class_code = 0; | ||
308 | pdn->phb = phb; | ||
309 | phb->pci_data = pdn; | ||
310 | } | ||
152 | 311 | ||
153 | /* Update dn->phb ptrs for new phb and children devices */ | 312 | /* Update dn->phb ptrs for new phb and children devices */ |
154 | traverse_pci_devices(dn, update_dn_pci_info, phb); | 313 | traverse_pci_devices(dn, update_dn_pci_info, phb); |
@@ -171,3 +330,16 @@ void __init pci_devs_phb_init(void) | |||
171 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) | 330 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) |
172 | pci_devs_phb_init_dynamic(phb); | 331 | pci_devs_phb_init_dynamic(phb); |
173 | } | 332 | } |
333 | |||
334 | static void pci_dev_pdn_setup(struct pci_dev *pdev) | ||
335 | { | ||
336 | struct pci_dn *pdn; | ||
337 | |||
338 | if (pdev->dev.archdata.pci_data) | ||
339 | return; | ||
340 | |||
341 | /* Setup the fast path */ | ||
342 | pdn = pci_get_pdn(pdev); | ||
343 | pdev->dev.archdata.pci_data = pdn; | ||
344 | } | ||
345 | DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup); | ||
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index e6245e9c7d8d..7122dfece393 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -305,7 +305,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, | |||
305 | const __be32 *reg; | 305 | const __be32 *reg; |
306 | int reglen, devfn; | 306 | int reglen, devfn; |
307 | #ifdef CONFIG_EEH | 307 | #ifdef CONFIG_EEH |
308 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | 308 | struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn)); |
309 | #endif | 309 | #endif |
310 | 310 | ||
311 | pr_debug(" * %s\n", dn->full_name); | 311 | pr_debug(" * %s\n", dn->full_name); |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index ce230da2c015..af29df2517f7 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -113,7 +113,7 @@ static int rtas_pci_read_config(struct pci_bus *bus, | |||
113 | 113 | ||
114 | ret = rtas_read_config(pdn, where, size, val); | 114 | ret = rtas_read_config(pdn, where, size, val); |
115 | if (*val == EEH_IO_ERROR_VALUE(size) && | 115 | if (*val == EEH_IO_ERROR_VALUE(size) && |
116 | eeh_dev_check_failure(of_node_to_eeh_dev(dn))) | 116 | eeh_dev_check_failure(pdn_to_eeh_dev(pdn))) |
117 | return PCIBIOS_DEVICE_NOT_FOUND; | 117 | return PCIBIOS_DEVICE_NOT_FOUND; |
118 | 118 | ||
119 | return ret; | 119 | return ret; |
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 6f3c5d33c3af..33e44f37212f 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile | |||
@@ -5,7 +5,7 @@ obj-y += opal-msglog.o opal-hmi.o opal-power.o | |||
5 | 5 | ||
6 | obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o | 6 | obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o |
7 | obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o | 7 | obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o |
8 | obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o | 8 | obj-$(CONFIG_EEH) += eeh-powernv.o |
9 | obj-$(CONFIG_PPC_SCOM) += opal-xscom.o | 9 | obj-$(CONFIG_PPC_SCOM) += opal-xscom.o |
10 | obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o | 10 | obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o |
11 | obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o | 11 | obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o |
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c deleted file mode 100644 index 2809c9895288..000000000000 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ /dev/null | |||
@@ -1,1149 +0,0 @@ | |||
1 | /* | ||
2 | * The file intends to implement the functions needed by EEH, which is | ||
3 | * built on IODA compliant chip. Actually, lots of functions related | ||
4 | * to EEH would be built based on the OPAL APIs. | ||
5 | * | ||
6 | * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/debugfs.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/irq.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/msi.h> | ||
20 | #include <linux/notifier.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/string.h> | ||
23 | |||
24 | #include <asm/eeh.h> | ||
25 | #include <asm/eeh_event.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/iommu.h> | ||
28 | #include <asm/msi_bitmap.h> | ||
29 | #include <asm/opal.h> | ||
30 | #include <asm/pci-bridge.h> | ||
31 | #include <asm/ppc-pci.h> | ||
32 | #include <asm/tce.h> | ||
33 | |||
34 | #include "powernv.h" | ||
35 | #include "pci.h" | ||
36 | |||
37 | static int ioda_eeh_nb_init = 0; | ||
38 | |||
39 | static int ioda_eeh_event(struct notifier_block *nb, | ||
40 | unsigned long events, void *change) | ||
41 | { | ||
42 | uint64_t changed_evts = (uint64_t)change; | ||
43 | |||
44 | /* | ||
45 | * We simply send special EEH event if EEH has | ||
46 | * been enabled, or clear pending events in | ||
47 | * case that we enable EEH soon | ||
48 | */ | ||
49 | if (!(changed_evts & OPAL_EVENT_PCI_ERROR) || | ||
50 | !(events & OPAL_EVENT_PCI_ERROR)) | ||
51 | return 0; | ||
52 | |||
53 | if (eeh_enabled()) | ||
54 | eeh_send_failure_event(NULL); | ||
55 | else | ||
56 | opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static struct notifier_block ioda_eeh_nb = { | ||
62 | .notifier_call = ioda_eeh_event, | ||
63 | .next = NULL, | ||
64 | .priority = 0 | ||
65 | }; | ||
66 | |||
67 | #ifdef CONFIG_DEBUG_FS | ||
68 | static ssize_t ioda_eeh_ei_write(struct file *filp, | ||
69 | const char __user *user_buf, | ||
70 | size_t count, loff_t *ppos) | ||
71 | { | ||
72 | struct pci_controller *hose = filp->private_data; | ||
73 | struct pnv_phb *phb = hose->private_data; | ||
74 | struct eeh_dev *edev; | ||
75 | struct eeh_pe *pe; | ||
76 | int pe_no, type, func; | ||
77 | unsigned long addr, mask; | ||
78 | char buf[50]; | ||
79 | int ret; | ||
80 | |||
81 | if (!phb->eeh_ops || !phb->eeh_ops->err_inject) | ||
82 | return -ENXIO; | ||
83 | |||
84 | ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); | ||
85 | if (!ret) | ||
86 | return -EFAULT; | ||
87 | |||
88 | /* Retrieve parameters */ | ||
89 | ret = sscanf(buf, "%x:%x:%x:%lx:%lx", | ||
90 | &pe_no, &type, &func, &addr, &mask); | ||
91 | if (ret != 5) | ||
92 | return -EINVAL; | ||
93 | |||
94 | /* Retrieve PE */ | ||
95 | edev = kzalloc(sizeof(*edev), GFP_KERNEL); | ||
96 | if (!edev) | ||
97 | return -ENOMEM; | ||
98 | edev->phb = hose; | ||
99 | edev->pe_config_addr = pe_no; | ||
100 | pe = eeh_pe_get(edev); | ||
101 | kfree(edev); | ||
102 | if (!pe) | ||
103 | return -ENODEV; | ||
104 | |||
105 | /* Do error injection */ | ||
106 | ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask); | ||
107 | return ret < 0 ? ret : count; | ||
108 | } | ||
109 | |||
110 | static const struct file_operations ioda_eeh_ei_fops = { | ||
111 | .open = simple_open, | ||
112 | .llseek = no_llseek, | ||
113 | .write = ioda_eeh_ei_write, | ||
114 | }; | ||
115 | |||
116 | static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val) | ||
117 | { | ||
118 | struct pci_controller *hose = data; | ||
119 | struct pnv_phb *phb = hose->private_data; | ||
120 | |||
121 | out_be64(phb->regs + offset, val); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val) | ||
126 | { | ||
127 | struct pci_controller *hose = data; | ||
128 | struct pnv_phb *phb = hose->private_data; | ||
129 | |||
130 | *val = in_be64(phb->regs + offset); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int ioda_eeh_outb_dbgfs_set(void *data, u64 val) | ||
135 | { | ||
136 | return ioda_eeh_dbgfs_set(data, 0xD10, val); | ||
137 | } | ||
138 | |||
139 | static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val) | ||
140 | { | ||
141 | return ioda_eeh_dbgfs_get(data, 0xD10, val); | ||
142 | } | ||
143 | |||
144 | static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val) | ||
145 | { | ||
146 | return ioda_eeh_dbgfs_set(data, 0xD90, val); | ||
147 | } | ||
148 | |||
149 | static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val) | ||
150 | { | ||
151 | return ioda_eeh_dbgfs_get(data, 0xD90, val); | ||
152 | } | ||
153 | |||
154 | static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val) | ||
155 | { | ||
156 | return ioda_eeh_dbgfs_set(data, 0xE10, val); | ||
157 | } | ||
158 | |||
159 | static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val) | ||
160 | { | ||
161 | return ioda_eeh_dbgfs_get(data, 0xE10, val); | ||
162 | } | ||
163 | |||
164 | DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get, | ||
165 | ioda_eeh_outb_dbgfs_set, "0x%llx\n"); | ||
166 | DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get, | ||
167 | ioda_eeh_inbA_dbgfs_set, "0x%llx\n"); | ||
168 | DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get, | ||
169 | ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); | ||
170 | #endif /* CONFIG_DEBUG_FS */ | ||
171 | |||
172 | |||
173 | /** | ||
174 | * ioda_eeh_post_init - Chip dependent post initialization | ||
175 | * @hose: PCI controller | ||
176 | * | ||
177 | * The function will be called after eeh PEs and devices | ||
178 | * have been built. That means the EEH is ready to supply | ||
179 | * service with I/O cache. | ||
180 | */ | ||
181 | static int ioda_eeh_post_init(struct pci_controller *hose) | ||
182 | { | ||
183 | struct pnv_phb *phb = hose->private_data; | ||
184 | int ret; | ||
185 | |||
186 | /* Register OPAL event notifier */ | ||
187 | if (!ioda_eeh_nb_init) { | ||
188 | ret = opal_notifier_register(&ioda_eeh_nb); | ||
189 | if (ret) { | ||
190 | pr_err("%s: Can't register OPAL event notifier (%d)\n", | ||
191 | __func__, ret); | ||
192 | return ret; | ||
193 | } | ||
194 | |||
195 | ioda_eeh_nb_init = 1; | ||
196 | } | ||
197 | |||
198 | #ifdef CONFIG_DEBUG_FS | ||
199 | if (!phb->has_dbgfs && phb->dbgfs) { | ||
200 | phb->has_dbgfs = 1; | ||
201 | |||
202 | debugfs_create_file("err_injct", 0200, | ||
203 | phb->dbgfs, hose, | ||
204 | &ioda_eeh_ei_fops); | ||
205 | |||
206 | debugfs_create_file("err_injct_outbound", 0600, | ||
207 | phb->dbgfs, hose, | ||
208 | &ioda_eeh_outb_dbgfs_ops); | ||
209 | debugfs_create_file("err_injct_inboundA", 0600, | ||
210 | phb->dbgfs, hose, | ||
211 | &ioda_eeh_inbA_dbgfs_ops); | ||
212 | debugfs_create_file("err_injct_inboundB", 0600, | ||
213 | phb->dbgfs, hose, | ||
214 | &ioda_eeh_inbB_dbgfs_ops); | ||
215 | } | ||
216 | #endif | ||
217 | |||
218 | /* If EEH is enabled, we're going to rely on that. | ||
219 | * Otherwise, we restore to conventional mechanism | ||
220 | * to clear frozen PE during PCI config access. | ||
221 | */ | ||
222 | if (eeh_enabled()) | ||
223 | phb->flags |= PNV_PHB_FLAG_EEH; | ||
224 | else | ||
225 | phb->flags &= ~PNV_PHB_FLAG_EEH; | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * ioda_eeh_set_option - Set EEH operation or I/O setting | ||
232 | * @pe: EEH PE | ||
233 | * @option: options | ||
234 | * | ||
235 | * Enable or disable EEH option for the indicated PE. The | ||
236 | * function also can be used to enable I/O or DMA for the | ||
237 | * PE. | ||
238 | */ | ||
239 | static int ioda_eeh_set_option(struct eeh_pe *pe, int option) | ||
240 | { | ||
241 | struct pci_controller *hose = pe->phb; | ||
242 | struct pnv_phb *phb = hose->private_data; | ||
243 | bool freeze_pe = false; | ||
244 | int enable, ret = 0; | ||
245 | s64 rc; | ||
246 | |||
247 | /* Check on PE number */ | ||
248 | if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) { | ||
249 | pr_err("%s: PE address %x out of range [0, %x] " | ||
250 | "on PHB#%x\n", | ||
251 | __func__, pe->addr, phb->ioda.total_pe, | ||
252 | hose->global_number); | ||
253 | return -EINVAL; | ||
254 | } | ||
255 | |||
256 | switch (option) { | ||
257 | case EEH_OPT_DISABLE: | ||
258 | return -EPERM; | ||
259 | case EEH_OPT_ENABLE: | ||
260 | return 0; | ||
261 | case EEH_OPT_THAW_MMIO: | ||
262 | enable = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; | ||
263 | break; | ||
264 | case EEH_OPT_THAW_DMA: | ||
265 | enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; | ||
266 | break; | ||
267 | case EEH_OPT_FREEZE_PE: | ||
268 | freeze_pe = true; | ||
269 | enable = OPAL_EEH_ACTION_SET_FREEZE_ALL; | ||
270 | break; | ||
271 | default: | ||
272 | pr_warn("%s: Invalid option %d\n", | ||
273 | __func__, option); | ||
274 | return -EINVAL; | ||
275 | } | ||
276 | |||
277 | /* If PHB supports compound PE, to handle it */ | ||
278 | if (freeze_pe) { | ||
279 | if (phb->freeze_pe) { | ||
280 | phb->freeze_pe(phb, pe->addr); | ||
281 | } else { | ||
282 | rc = opal_pci_eeh_freeze_set(phb->opal_id, | ||
283 | pe->addr, | ||
284 | enable); | ||
285 | if (rc != OPAL_SUCCESS) { | ||
286 | pr_warn("%s: Failure %lld freezing " | ||
287 | "PHB#%x-PE#%x\n", | ||
288 | __func__, rc, | ||
289 | phb->hose->global_number, pe->addr); | ||
290 | ret = -EIO; | ||
291 | } | ||
292 | } | ||
293 | } else { | ||
294 | if (phb->unfreeze_pe) { | ||
295 | ret = phb->unfreeze_pe(phb, pe->addr, enable); | ||
296 | } else { | ||
297 | rc = opal_pci_eeh_freeze_clear(phb->opal_id, | ||
298 | pe->addr, | ||
299 | enable); | ||
300 | if (rc != OPAL_SUCCESS) { | ||
301 | pr_warn("%s: Failure %lld enable %d " | ||
302 | "for PHB#%x-PE#%x\n", | ||
303 | __func__, rc, option, | ||
304 | phb->hose->global_number, pe->addr); | ||
305 | ret = -EIO; | ||
306 | } | ||
307 | } | ||
308 | } | ||
309 | |||
310 | return ret; | ||
311 | } | ||
312 | |||
313 | static void ioda_eeh_phb_diag(struct eeh_pe *pe) | ||
314 | { | ||
315 | struct pnv_phb *phb = pe->phb->private_data; | ||
316 | long rc; | ||
317 | |||
318 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, | ||
319 | PNV_PCI_DIAG_BUF_SIZE); | ||
320 | if (rc != OPAL_SUCCESS) | ||
321 | pr_warn("%s: Failed to get diag-data for PHB#%x (%ld)\n", | ||
322 | __func__, pe->phb->global_number, rc); | ||
323 | } | ||
324 | |||
325 | static int ioda_eeh_get_phb_state(struct eeh_pe *pe) | ||
326 | { | ||
327 | struct pnv_phb *phb = pe->phb->private_data; | ||
328 | u8 fstate; | ||
329 | __be16 pcierr; | ||
330 | s64 rc; | ||
331 | int result = 0; | ||
332 | |||
333 | rc = opal_pci_eeh_freeze_status(phb->opal_id, | ||
334 | pe->addr, | ||
335 | &fstate, | ||
336 | &pcierr, | ||
337 | NULL); | ||
338 | if (rc != OPAL_SUCCESS) { | ||
339 | pr_warn("%s: Failure %lld getting PHB#%x state\n", | ||
340 | __func__, rc, phb->hose->global_number); | ||
341 | return EEH_STATE_NOT_SUPPORT; | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * Check PHB state. If the PHB is frozen for the | ||
346 | * first time, to dump the PHB diag-data. | ||
347 | */ | ||
348 | if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { | ||
349 | result = (EEH_STATE_MMIO_ACTIVE | | ||
350 | EEH_STATE_DMA_ACTIVE | | ||
351 | EEH_STATE_MMIO_ENABLED | | ||
352 | EEH_STATE_DMA_ENABLED); | ||
353 | } else if (!(pe->state & EEH_PE_ISOLATED)) { | ||
354 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
355 | ioda_eeh_phb_diag(pe); | ||
356 | |||
357 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
358 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | ||
359 | } | ||
360 | |||
361 | return result; | ||
362 | } | ||
363 | |||
364 | static int ioda_eeh_get_pe_state(struct eeh_pe *pe) | ||
365 | { | ||
366 | struct pnv_phb *phb = pe->phb->private_data; | ||
367 | u8 fstate; | ||
368 | __be16 pcierr; | ||
369 | s64 rc; | ||
370 | int result; | ||
371 | |||
372 | /* | ||
373 | * We don't clobber hardware frozen state until PE | ||
374 | * reset is completed. In order to keep EEH core | ||
375 | * moving forward, we have to return operational | ||
376 | * state during PE reset. | ||
377 | */ | ||
378 | if (pe->state & EEH_PE_RESET) { | ||
379 | result = (EEH_STATE_MMIO_ACTIVE | | ||
380 | EEH_STATE_DMA_ACTIVE | | ||
381 | EEH_STATE_MMIO_ENABLED | | ||
382 | EEH_STATE_DMA_ENABLED); | ||
383 | return result; | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * Fetch PE state from hardware. If the PHB | ||
388 | * supports compound PE, let it handle that. | ||
389 | */ | ||
390 | if (phb->get_pe_state) { | ||
391 | fstate = phb->get_pe_state(phb, pe->addr); | ||
392 | } else { | ||
393 | rc = opal_pci_eeh_freeze_status(phb->opal_id, | ||
394 | pe->addr, | ||
395 | &fstate, | ||
396 | &pcierr, | ||
397 | NULL); | ||
398 | if (rc != OPAL_SUCCESS) { | ||
399 | pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", | ||
400 | __func__, rc, phb->hose->global_number, pe->addr); | ||
401 | return EEH_STATE_NOT_SUPPORT; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | /* Figure out state */ | ||
406 | switch (fstate) { | ||
407 | case OPAL_EEH_STOPPED_NOT_FROZEN: | ||
408 | result = (EEH_STATE_MMIO_ACTIVE | | ||
409 | EEH_STATE_DMA_ACTIVE | | ||
410 | EEH_STATE_MMIO_ENABLED | | ||
411 | EEH_STATE_DMA_ENABLED); | ||
412 | break; | ||
413 | case OPAL_EEH_STOPPED_MMIO_FREEZE: | ||
414 | result = (EEH_STATE_DMA_ACTIVE | | ||
415 | EEH_STATE_DMA_ENABLED); | ||
416 | break; | ||
417 | case OPAL_EEH_STOPPED_DMA_FREEZE: | ||
418 | result = (EEH_STATE_MMIO_ACTIVE | | ||
419 | EEH_STATE_MMIO_ENABLED); | ||
420 | break; | ||
421 | case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: | ||
422 | result = 0; | ||
423 | break; | ||
424 | case OPAL_EEH_STOPPED_RESET: | ||
425 | result = EEH_STATE_RESET_ACTIVE; | ||
426 | break; | ||
427 | case OPAL_EEH_STOPPED_TEMP_UNAVAIL: | ||
428 | result = EEH_STATE_UNAVAILABLE; | ||
429 | break; | ||
430 | case OPAL_EEH_STOPPED_PERM_UNAVAIL: | ||
431 | result = EEH_STATE_NOT_SUPPORT; | ||
432 | break; | ||
433 | default: | ||
434 | result = EEH_STATE_NOT_SUPPORT; | ||
435 | pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", | ||
436 | __func__, phb->hose->global_number, | ||
437 | pe->addr, fstate); | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * If PHB supports compound PE, to freeze all | ||
442 | * slave PEs for consistency. | ||
443 | * | ||
444 | * If the PE is switching to frozen state for the | ||
445 | * first time, to dump the PHB diag-data. | ||
446 | */ | ||
447 | if (!(result & EEH_STATE_NOT_SUPPORT) && | ||
448 | !(result & EEH_STATE_UNAVAILABLE) && | ||
449 | !(result & EEH_STATE_MMIO_ACTIVE) && | ||
450 | !(result & EEH_STATE_DMA_ACTIVE) && | ||
451 | !(pe->state & EEH_PE_ISOLATED)) { | ||
452 | if (phb->freeze_pe) | ||
453 | phb->freeze_pe(phb, pe->addr); | ||
454 | |||
455 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
456 | ioda_eeh_phb_diag(pe); | ||
457 | |||
458 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
459 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | ||
460 | } | ||
461 | |||
462 | return result; | ||
463 | } | ||
464 | |||
465 | /** | ||
466 | * ioda_eeh_get_state - Retrieve the state of PE | ||
467 | * @pe: EEH PE | ||
468 | * | ||
469 | * The PE's state should be retrieved from the PEEV, PEST | ||
470 | * IODA tables. Since the OPAL has exported the function | ||
471 | * to do it, it'd better to use that. | ||
472 | */ | ||
473 | static int ioda_eeh_get_state(struct eeh_pe *pe) | ||
474 | { | ||
475 | struct pnv_phb *phb = pe->phb->private_data; | ||
476 | |||
477 | /* Sanity check on PE number. PHB PE should have 0 */ | ||
478 | if (pe->addr < 0 || | ||
479 | pe->addr >= phb->ioda.total_pe) { | ||
480 | pr_warn("%s: PHB#%x-PE#%x out of range [0, %x]\n", | ||
481 | __func__, phb->hose->global_number, | ||
482 | pe->addr, phb->ioda.total_pe); | ||
483 | return EEH_STATE_NOT_SUPPORT; | ||
484 | } | ||
485 | |||
486 | if (pe->type & EEH_PE_PHB) | ||
487 | return ioda_eeh_get_phb_state(pe); | ||
488 | |||
489 | return ioda_eeh_get_pe_state(pe); | ||
490 | } | ||
491 | |||
492 | static s64 ioda_eeh_phb_poll(struct pnv_phb *phb) | ||
493 | { | ||
494 | s64 rc = OPAL_HARDWARE; | ||
495 | |||
496 | while (1) { | ||
497 | rc = opal_pci_poll(phb->opal_id); | ||
498 | if (rc <= 0) | ||
499 | break; | ||
500 | |||
501 | if (system_state < SYSTEM_RUNNING) | ||
502 | udelay(1000 * rc); | ||
503 | else | ||
504 | msleep(rc); | ||
505 | } | ||
506 | |||
507 | return rc; | ||
508 | } | ||
509 | |||
510 | int ioda_eeh_phb_reset(struct pci_controller *hose, int option) | ||
511 | { | ||
512 | struct pnv_phb *phb = hose->private_data; | ||
513 | s64 rc = OPAL_HARDWARE; | ||
514 | |||
515 | pr_debug("%s: Reset PHB#%x, option=%d\n", | ||
516 | __func__, hose->global_number, option); | ||
517 | |||
518 | /* Issue PHB complete reset request */ | ||
519 | if (option == EEH_RESET_FUNDAMENTAL || | ||
520 | option == EEH_RESET_HOT) | ||
521 | rc = opal_pci_reset(phb->opal_id, | ||
522 | OPAL_RESET_PHB_COMPLETE, | ||
523 | OPAL_ASSERT_RESET); | ||
524 | else if (option == EEH_RESET_DEACTIVATE) | ||
525 | rc = opal_pci_reset(phb->opal_id, | ||
526 | OPAL_RESET_PHB_COMPLETE, | ||
527 | OPAL_DEASSERT_RESET); | ||
528 | if (rc < 0) | ||
529 | goto out; | ||
530 | |||
531 | /* | ||
532 | * Poll state of the PHB until the request is done | ||
533 | * successfully. The PHB reset is usually PHB complete | ||
534 | * reset followed by hot reset on root bus. So we also | ||
535 | * need the PCI bus settlement delay. | ||
536 | */ | ||
537 | rc = ioda_eeh_phb_poll(phb); | ||
538 | if (option == EEH_RESET_DEACTIVATE) { | ||
539 | if (system_state < SYSTEM_RUNNING) | ||
540 | udelay(1000 * EEH_PE_RST_SETTLE_TIME); | ||
541 | else | ||
542 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
543 | } | ||
544 | out: | ||
545 | if (rc != OPAL_SUCCESS) | ||
546 | return -EIO; | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int ioda_eeh_root_reset(struct pci_controller *hose, int option) | ||
552 | { | ||
553 | struct pnv_phb *phb = hose->private_data; | ||
554 | s64 rc = OPAL_SUCCESS; | ||
555 | |||
556 | pr_debug("%s: Reset PHB#%x, option=%d\n", | ||
557 | __func__, hose->global_number, option); | ||
558 | |||
559 | /* | ||
560 | * During the reset deassert time, we needn't care | ||
561 | * the reset scope because the firmware does nothing | ||
562 | * for fundamental or hot reset during deassert phase. | ||
563 | */ | ||
564 | if (option == EEH_RESET_FUNDAMENTAL) | ||
565 | rc = opal_pci_reset(phb->opal_id, | ||
566 | OPAL_RESET_PCI_FUNDAMENTAL, | ||
567 | OPAL_ASSERT_RESET); | ||
568 | else if (option == EEH_RESET_HOT) | ||
569 | rc = opal_pci_reset(phb->opal_id, | ||
570 | OPAL_RESET_PCI_HOT, | ||
571 | OPAL_ASSERT_RESET); | ||
572 | else if (option == EEH_RESET_DEACTIVATE) | ||
573 | rc = opal_pci_reset(phb->opal_id, | ||
574 | OPAL_RESET_PCI_HOT, | ||
575 | OPAL_DEASSERT_RESET); | ||
576 | if (rc < 0) | ||
577 | goto out; | ||
578 | |||
579 | /* Poll state of the PHB until the request is done */ | ||
580 | rc = ioda_eeh_phb_poll(phb); | ||
581 | if (option == EEH_RESET_DEACTIVATE) | ||
582 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
583 | out: | ||
584 | if (rc != OPAL_SUCCESS) | ||
585 | return -EIO; | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option) | ||
591 | |||
592 | { | ||
593 | struct device_node *dn = pci_device_to_OF_node(dev); | ||
594 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | ||
595 | int aer = edev ? edev->aer_cap : 0; | ||
596 | u32 ctrl; | ||
597 | |||
598 | pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n", | ||
599 | __func__, pci_domain_nr(dev->bus), | ||
600 | dev->bus->number, option); | ||
601 | |||
602 | switch (option) { | ||
603 | case EEH_RESET_FUNDAMENTAL: | ||
604 | case EEH_RESET_HOT: | ||
605 | /* Don't report linkDown event */ | ||
606 | if (aer) { | ||
607 | eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK, | ||
608 | 4, &ctrl); | ||
609 | ctrl |= PCI_ERR_UNC_SURPDN; | ||
610 | eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK, | ||
611 | 4, ctrl); | ||
612 | } | ||
613 | |||
614 | eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl); | ||
615 | ctrl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
616 | eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl); | ||
617 | msleep(EEH_PE_RST_HOLD_TIME); | ||
618 | |||
619 | break; | ||
620 | case EEH_RESET_DEACTIVATE: | ||
621 | eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl); | ||
622 | ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
623 | eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl); | ||
624 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
625 | |||
626 | /* Continue reporting linkDown event */ | ||
627 | if (aer) { | ||
628 | eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK, | ||
629 | 4, &ctrl); | ||
630 | ctrl &= ~PCI_ERR_UNC_SURPDN; | ||
631 | eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK, | ||
632 | 4, ctrl); | ||
633 | } | ||
634 | |||
635 | break; | ||
636 | } | ||
637 | |||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | void pnv_pci_reset_secondary_bus(struct pci_dev *dev) | ||
642 | { | ||
643 | struct pci_controller *hose; | ||
644 | |||
645 | if (pci_is_root_bus(dev->bus)) { | ||
646 | hose = pci_bus_to_host(dev->bus); | ||
647 | ioda_eeh_root_reset(hose, EEH_RESET_HOT); | ||
648 | ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); | ||
649 | } else { | ||
650 | ioda_eeh_bridge_reset(dev, EEH_RESET_HOT); | ||
651 | ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * ioda_eeh_reset - Reset the indicated PE | ||
657 | * @pe: EEH PE | ||
658 | * @option: reset option | ||
659 | * | ||
660 | * Do reset on the indicated PE. For PCI bus sensitive PE, | ||
661 | * we need to reset the parent p2p bridge. The PHB has to | ||
662 | * be reinitialized if the p2p bridge is root bridge. For | ||
663 | * PCI device sensitive PE, we will try to reset the device | ||
664 | * through FLR. For now, we don't have OPAL APIs to do HARD | ||
665 | * reset yet, so all reset would be SOFT (HOT) reset. | ||
666 | */ | ||
667 | static int ioda_eeh_reset(struct eeh_pe *pe, int option) | ||
668 | { | ||
669 | struct pci_controller *hose = pe->phb; | ||
670 | struct pci_bus *bus; | ||
671 | int ret; | ||
672 | |||
673 | /* | ||
674 | * For PHB reset, we always have complete reset. For those PEs whose | ||
675 | * primary bus derived from root complex (root bus) or root port | ||
676 | * (usually bus#1), we apply hot or fundamental reset on the root port. | ||
677 | * For other PEs, we always have hot reset on the PE primary bus. | ||
678 | * | ||
679 | * Here, we have different design to pHyp, which always clear the | ||
680 | * frozen state during PE reset. However, the good idea here from | ||
681 | * benh is to keep frozen state before we get PE reset done completely | ||
682 | * (until BAR restore). With the frozen state, HW drops illegal IO | ||
683 | * or MMIO access, which can incur recrusive frozen PE during PE | ||
684 | * reset. The side effect is that EEH core has to clear the frozen | ||
685 | * state explicitly after BAR restore. | ||
686 | */ | ||
687 | if (pe->type & EEH_PE_PHB) { | ||
688 | ret = ioda_eeh_phb_reset(hose, option); | ||
689 | } else { | ||
690 | struct pnv_phb *phb; | ||
691 | s64 rc; | ||
692 | |||
693 | /* | ||
694 | * The frozen PE might be caused by PAPR error injection | ||
695 | * registers, which are expected to be cleared after hitting | ||
696 | * frozen PE as stated in the hardware spec. Unfortunately, | ||
697 | * that's not true on P7IOC. So we have to clear it manually | ||
698 | * to avoid recursive EEH errors during recovery. | ||
699 | */ | ||
700 | phb = hose->private_data; | ||
701 | if (phb->model == PNV_PHB_MODEL_P7IOC && | ||
702 | (option == EEH_RESET_HOT || | ||
703 | option == EEH_RESET_FUNDAMENTAL)) { | ||
704 | rc = opal_pci_reset(phb->opal_id, | ||
705 | OPAL_RESET_PHB_ERROR, | ||
706 | OPAL_ASSERT_RESET); | ||
707 | if (rc != OPAL_SUCCESS) { | ||
708 | pr_warn("%s: Failure %lld clearing " | ||
709 | "error injection registers\n", | ||
710 | __func__, rc); | ||
711 | return -EIO; | ||
712 | } | ||
713 | } | ||
714 | |||
715 | bus = eeh_pe_bus_get(pe); | ||
716 | if (pci_is_root_bus(bus) || | ||
717 | pci_is_root_bus(bus->parent)) | ||
718 | ret = ioda_eeh_root_reset(hose, option); | ||
719 | else | ||
720 | ret = ioda_eeh_bridge_reset(bus->self, option); | ||
721 | } | ||
722 | |||
723 | return ret; | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * ioda_eeh_get_log - Retrieve error log | ||
728 | * @pe: frozen PE | ||
729 | * @severity: permanent or temporary error | ||
730 | * @drv_log: device driver log | ||
731 | * @len: length of device driver log | ||
732 | * | ||
733 | * Retrieve error log, which contains log from device driver | ||
734 | * and firmware. | ||
735 | */ | ||
736 | static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, | ||
737 | char *drv_log, unsigned long len) | ||
738 | { | ||
739 | if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
740 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | ||
741 | |||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | /** | ||
746 | * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE | ||
747 | * @pe: EEH PE | ||
748 | * | ||
749 | * For particular PE, it might have included PCI bridges. In order | ||
750 | * to make the PE work properly, those PCI bridges should be configured | ||
751 | * correctly. However, we need do nothing on P7IOC since the reset | ||
752 | * function will do everything that should be covered by the function. | ||
753 | */ | ||
754 | static int ioda_eeh_configure_bridge(struct eeh_pe *pe) | ||
755 | { | ||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | static int ioda_eeh_err_inject(struct eeh_pe *pe, int type, int func, | ||
760 | unsigned long addr, unsigned long mask) | ||
761 | { | ||
762 | struct pci_controller *hose = pe->phb; | ||
763 | struct pnv_phb *phb = hose->private_data; | ||
764 | s64 ret; | ||
765 | |||
766 | /* Sanity check on error type */ | ||
767 | if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && | ||
768 | type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { | ||
769 | pr_warn("%s: Invalid error type %d\n", | ||
770 | __func__, type); | ||
771 | return -ERANGE; | ||
772 | } | ||
773 | |||
774 | if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || | ||
775 | func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { | ||
776 | pr_warn("%s: Invalid error function %d\n", | ||
777 | __func__, func); | ||
778 | return -ERANGE; | ||
779 | } | ||
780 | |||
781 | /* Firmware supports error injection ? */ | ||
782 | if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { | ||
783 | pr_warn("%s: Firmware doesn't support error injection\n", | ||
784 | __func__); | ||
785 | return -ENXIO; | ||
786 | } | ||
787 | |||
788 | /* Do error injection */ | ||
789 | ret = opal_pci_err_inject(phb->opal_id, pe->addr, | ||
790 | type, func, addr, mask); | ||
791 | if (ret != OPAL_SUCCESS) { | ||
792 | pr_warn("%s: Failure %lld injecting error " | ||
793 | "%d-%d to PHB#%x-PE#%x\n", | ||
794 | __func__, ret, type, func, | ||
795 | hose->global_number, pe->addr); | ||
796 | return -EIO; | ||
797 | } | ||
798 | |||
799 | return 0; | ||
800 | } | ||
801 | |||
802 | static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) | ||
803 | { | ||
804 | /* GEM */ | ||
805 | if (data->gemXfir || data->gemRfir || | ||
806 | data->gemRirqfir || data->gemMask || data->gemRwof) | ||
807 | pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", | ||
808 | be64_to_cpu(data->gemXfir), | ||
809 | be64_to_cpu(data->gemRfir), | ||
810 | be64_to_cpu(data->gemRirqfir), | ||
811 | be64_to_cpu(data->gemMask), | ||
812 | be64_to_cpu(data->gemRwof)); | ||
813 | |||
814 | /* LEM */ | ||
815 | if (data->lemFir || data->lemErrMask || | ||
816 | data->lemAction0 || data->lemAction1 || data->lemWof) | ||
817 | pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", | ||
818 | be64_to_cpu(data->lemFir), | ||
819 | be64_to_cpu(data->lemErrMask), | ||
820 | be64_to_cpu(data->lemAction0), | ||
821 | be64_to_cpu(data->lemAction1), | ||
822 | be64_to_cpu(data->lemWof)); | ||
823 | } | ||
824 | |||
825 | static void ioda_eeh_hub_diag(struct pci_controller *hose) | ||
826 | { | ||
827 | struct pnv_phb *phb = hose->private_data; | ||
828 | struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; | ||
829 | long rc; | ||
830 | |||
831 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); | ||
832 | if (rc != OPAL_SUCCESS) { | ||
833 | pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", | ||
834 | __func__, phb->hub_id, rc); | ||
835 | return; | ||
836 | } | ||
837 | |||
838 | switch (data->type) { | ||
839 | case OPAL_P7IOC_DIAG_TYPE_RGC: | ||
840 | pr_info("P7IOC diag-data for RGC\n\n"); | ||
841 | ioda_eeh_hub_diag_common(data); | ||
842 | if (data->rgc.rgcStatus || data->rgc.rgcLdcp) | ||
843 | pr_info(" RGC: %016llx %016llx\n", | ||
844 | be64_to_cpu(data->rgc.rgcStatus), | ||
845 | be64_to_cpu(data->rgc.rgcLdcp)); | ||
846 | break; | ||
847 | case OPAL_P7IOC_DIAG_TYPE_BI: | ||
848 | pr_info("P7IOC diag-data for BI %s\n\n", | ||
849 | data->bi.biDownbound ? "Downbound" : "Upbound"); | ||
850 | ioda_eeh_hub_diag_common(data); | ||
851 | if (data->bi.biLdcp0 || data->bi.biLdcp1 || | ||
852 | data->bi.biLdcp2 || data->bi.biFenceStatus) | ||
853 | pr_info(" BI: %016llx %016llx %016llx %016llx\n", | ||
854 | be64_to_cpu(data->bi.biLdcp0), | ||
855 | be64_to_cpu(data->bi.biLdcp1), | ||
856 | be64_to_cpu(data->bi.biLdcp2), | ||
857 | be64_to_cpu(data->bi.biFenceStatus)); | ||
858 | break; | ||
859 | case OPAL_P7IOC_DIAG_TYPE_CI: | ||
860 | pr_info("P7IOC diag-data for CI Port %d\n\n", | ||
861 | data->ci.ciPort); | ||
862 | ioda_eeh_hub_diag_common(data); | ||
863 | if (data->ci.ciPortStatus || data->ci.ciPortLdcp) | ||
864 | pr_info(" CI: %016llx %016llx\n", | ||
865 | be64_to_cpu(data->ci.ciPortStatus), | ||
866 | be64_to_cpu(data->ci.ciPortLdcp)); | ||
867 | break; | ||
868 | case OPAL_P7IOC_DIAG_TYPE_MISC: | ||
869 | pr_info("P7IOC diag-data for MISC\n\n"); | ||
870 | ioda_eeh_hub_diag_common(data); | ||
871 | break; | ||
872 | case OPAL_P7IOC_DIAG_TYPE_I2C: | ||
873 | pr_info("P7IOC diag-data for I2C\n\n"); | ||
874 | ioda_eeh_hub_diag_common(data); | ||
875 | break; | ||
876 | default: | ||
877 | pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", | ||
878 | __func__, phb->hub_id, data->type); | ||
879 | } | ||
880 | } | ||
881 | |||
882 | static int ioda_eeh_get_pe(struct pci_controller *hose, | ||
883 | u16 pe_no, struct eeh_pe **pe) | ||
884 | { | ||
885 | struct pnv_phb *phb = hose->private_data; | ||
886 | struct pnv_ioda_pe *pnv_pe; | ||
887 | struct eeh_pe *dev_pe; | ||
888 | struct eeh_dev edev; | ||
889 | |||
890 | /* | ||
891 | * If PHB supports compound PE, to fetch | ||
892 | * the master PE because slave PE is invisible | ||
893 | * to EEH core. | ||
894 | */ | ||
895 | pnv_pe = &phb->ioda.pe_array[pe_no]; | ||
896 | if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { | ||
897 | pnv_pe = pnv_pe->master; | ||
898 | WARN_ON(!pnv_pe || | ||
899 | !(pnv_pe->flags & PNV_IODA_PE_MASTER)); | ||
900 | pe_no = pnv_pe->pe_number; | ||
901 | } | ||
902 | |||
903 | /* Find the PE according to PE# */ | ||
904 | memset(&edev, 0, sizeof(struct eeh_dev)); | ||
905 | edev.phb = hose; | ||
906 | edev.pe_config_addr = pe_no; | ||
907 | dev_pe = eeh_pe_get(&edev); | ||
908 | if (!dev_pe) | ||
909 | return -EEXIST; | ||
910 | |||
911 | /* Freeze the (compound) PE */ | ||
912 | *pe = dev_pe; | ||
913 | if (!(dev_pe->state & EEH_PE_ISOLATED)) | ||
914 | phb->freeze_pe(phb, pe_no); | ||
915 | |||
916 | /* | ||
917 | * At this point, we're sure the (compound) PE should | ||
918 | * have been frozen. However, we still need poke until | ||
919 | * hitting the frozen PE on top level. | ||
920 | */ | ||
921 | dev_pe = dev_pe->parent; | ||
922 | while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { | ||
923 | int ret; | ||
924 | int active_flags = (EEH_STATE_MMIO_ACTIVE | | ||
925 | EEH_STATE_DMA_ACTIVE); | ||
926 | |||
927 | ret = eeh_ops->get_state(dev_pe, NULL); | ||
928 | if (ret <= 0 || (ret & active_flags) == active_flags) { | ||
929 | dev_pe = dev_pe->parent; | ||
930 | continue; | ||
931 | } | ||
932 | |||
933 | /* Frozen parent PE */ | ||
934 | *pe = dev_pe; | ||
935 | if (!(dev_pe->state & EEH_PE_ISOLATED)) | ||
936 | phb->freeze_pe(phb, dev_pe->addr); | ||
937 | |||
938 | /* Next one */ | ||
939 | dev_pe = dev_pe->parent; | ||
940 | } | ||
941 | |||
942 | return 0; | ||
943 | } | ||
944 | |||
945 | /** | ||
946 | * ioda_eeh_next_error - Retrieve next error for EEH core to handle | ||
947 | * @pe: The affected PE | ||
948 | * | ||
949 | * The function is expected to be called by EEH core while it gets | ||
950 | * special EEH event (without binding PE). The function calls to | ||
951 | * OPAL APIs for next error to handle. The informational error is | ||
952 | * handled internally by platform. However, the dead IOC, dead PHB, | ||
953 | * fenced PHB and frozen PE should be handled by EEH core eventually. | ||
954 | */ | ||
955 | static int ioda_eeh_next_error(struct eeh_pe **pe) | ||
956 | { | ||
957 | struct pci_controller *hose; | ||
958 | struct pnv_phb *phb; | ||
959 | struct eeh_pe *phb_pe, *parent_pe; | ||
960 | __be64 frozen_pe_no; | ||
961 | __be16 err_type, severity; | ||
962 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | ||
963 | long rc; | ||
964 | int state, ret = EEH_NEXT_ERR_NONE; | ||
965 | |||
966 | /* | ||
967 | * While running here, it's safe to purge the event queue. | ||
968 | * And we should keep the cached OPAL notifier event sychronized | ||
969 | * between the kernel and firmware. | ||
970 | */ | ||
971 | eeh_remove_event(NULL, false); | ||
972 | opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); | ||
973 | |||
974 | list_for_each_entry(hose, &hose_list, list_node) { | ||
975 | /* | ||
976 | * If the subordinate PCI buses of the PHB has been | ||
977 | * removed or is exactly under error recovery, we | ||
978 | * needn't take care of it any more. | ||
979 | */ | ||
980 | phb = hose->private_data; | ||
981 | phb_pe = eeh_phb_pe_get(hose); | ||
982 | if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) | ||
983 | continue; | ||
984 | |||
985 | rc = opal_pci_next_error(phb->opal_id, | ||
986 | &frozen_pe_no, &err_type, &severity); | ||
987 | |||
988 | /* If OPAL API returns error, we needn't proceed */ | ||
989 | if (rc != OPAL_SUCCESS) { | ||
990 | pr_devel("%s: Invalid return value on " | ||
991 | "PHB#%x (0x%lx) from opal_pci_next_error", | ||
992 | __func__, hose->global_number, rc); | ||
993 | continue; | ||
994 | } | ||
995 | |||
996 | /* If the PHB doesn't have error, stop processing */ | ||
997 | if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || | ||
998 | be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { | ||
999 | pr_devel("%s: No error found on PHB#%x\n", | ||
1000 | __func__, hose->global_number); | ||
1001 | continue; | ||
1002 | } | ||
1003 | |||
1004 | /* | ||
1005 | * Processing the error. We're expecting the error with | ||
1006 | * highest priority reported upon multiple errors on the | ||
1007 | * specific PHB. | ||
1008 | */ | ||
1009 | pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", | ||
1010 | __func__, be16_to_cpu(err_type), be16_to_cpu(severity), | ||
1011 | be64_to_cpu(frozen_pe_no), hose->global_number); | ||
1012 | switch (be16_to_cpu(err_type)) { | ||
1013 | case OPAL_EEH_IOC_ERROR: | ||
1014 | if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { | ||
1015 | pr_err("EEH: dead IOC detected\n"); | ||
1016 | ret = EEH_NEXT_ERR_DEAD_IOC; | ||
1017 | } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { | ||
1018 | pr_info("EEH: IOC informative error " | ||
1019 | "detected\n"); | ||
1020 | ioda_eeh_hub_diag(hose); | ||
1021 | ret = EEH_NEXT_ERR_NONE; | ||
1022 | } | ||
1023 | |||
1024 | break; | ||
1025 | case OPAL_EEH_PHB_ERROR: | ||
1026 | if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { | ||
1027 | *pe = phb_pe; | ||
1028 | pr_err("EEH: dead PHB#%x detected, " | ||
1029 | "location: %s\n", | ||
1030 | hose->global_number, | ||
1031 | eeh_pe_loc_get(phb_pe)); | ||
1032 | ret = EEH_NEXT_ERR_DEAD_PHB; | ||
1033 | } else if (be16_to_cpu(severity) == | ||
1034 | OPAL_EEH_SEV_PHB_FENCED) { | ||
1035 | *pe = phb_pe; | ||
1036 | pr_err("EEH: Fenced PHB#%x detected, " | ||
1037 | "location: %s\n", | ||
1038 | hose->global_number, | ||
1039 | eeh_pe_loc_get(phb_pe)); | ||
1040 | ret = EEH_NEXT_ERR_FENCED_PHB; | ||
1041 | } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { | ||
1042 | pr_info("EEH: PHB#%x informative error " | ||
1043 | "detected, location: %s\n", | ||
1044 | hose->global_number, | ||
1045 | eeh_pe_loc_get(phb_pe)); | ||
1046 | ioda_eeh_phb_diag(phb_pe); | ||
1047 | pnv_pci_dump_phb_diag_data(hose, phb_pe->data); | ||
1048 | ret = EEH_NEXT_ERR_NONE; | ||
1049 | } | ||
1050 | |||
1051 | break; | ||
1052 | case OPAL_EEH_PE_ERROR: | ||
1053 | /* | ||
1054 | * If we can't find the corresponding PE, we | ||
1055 | * just try to unfreeze. | ||
1056 | */ | ||
1057 | if (ioda_eeh_get_pe(hose, | ||
1058 | be64_to_cpu(frozen_pe_no), pe)) { | ||
1059 | /* Try best to clear it */ | ||
1060 | pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", | ||
1061 | hose->global_number, frozen_pe_no); | ||
1062 | pr_info("EEH: PHB location: %s\n", | ||
1063 | eeh_pe_loc_get(phb_pe)); | ||
1064 | opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no, | ||
1065 | OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); | ||
1066 | ret = EEH_NEXT_ERR_NONE; | ||
1067 | } else if ((*pe)->state & EEH_PE_ISOLATED || | ||
1068 | eeh_pe_passed(*pe)) { | ||
1069 | ret = EEH_NEXT_ERR_NONE; | ||
1070 | } else { | ||
1071 | pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", | ||
1072 | (*pe)->addr, (*pe)->phb->global_number); | ||
1073 | pr_err("EEH: PE location: %s, PHB location: %s\n", | ||
1074 | eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe)); | ||
1075 | ret = EEH_NEXT_ERR_FROZEN_PE; | ||
1076 | } | ||
1077 | |||
1078 | break; | ||
1079 | default: | ||
1080 | pr_warn("%s: Unexpected error type %d\n", | ||
1081 | __func__, be16_to_cpu(err_type)); | ||
1082 | } | ||
1083 | |||
1084 | /* | ||
1085 | * EEH core will try recover from fenced PHB or | ||
1086 | * frozen PE. In the time for frozen PE, EEH core | ||
1087 | * enable IO path for that before collecting logs, | ||
1088 | * but it ruins the site. So we have to dump the | ||
1089 | * log in advance here. | ||
1090 | */ | ||
1091 | if ((ret == EEH_NEXT_ERR_FROZEN_PE || | ||
1092 | ret == EEH_NEXT_ERR_FENCED_PHB) && | ||
1093 | !((*pe)->state & EEH_PE_ISOLATED)) { | ||
1094 | eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); | ||
1095 | ioda_eeh_phb_diag(*pe); | ||
1096 | |||
1097 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
1098 | pnv_pci_dump_phb_diag_data((*pe)->phb, | ||
1099 | (*pe)->data); | ||
1100 | } | ||
1101 | |||
1102 | /* | ||
1103 | * We probably have the frozen parent PE out there and | ||
1104 | * we need have to handle frozen parent PE firstly. | ||
1105 | */ | ||
1106 | if (ret == EEH_NEXT_ERR_FROZEN_PE) { | ||
1107 | parent_pe = (*pe)->parent; | ||
1108 | while (parent_pe) { | ||
1109 | /* Hit the ceiling ? */ | ||
1110 | if (parent_pe->type & EEH_PE_PHB) | ||
1111 | break; | ||
1112 | |||
1113 | /* Frozen parent PE ? */ | ||
1114 | state = ioda_eeh_get_state(parent_pe); | ||
1115 | if (state > 0 && | ||
1116 | (state & active_flags) != active_flags) | ||
1117 | *pe = parent_pe; | ||
1118 | |||
1119 | /* Next parent level */ | ||
1120 | parent_pe = parent_pe->parent; | ||
1121 | } | ||
1122 | |||
1123 | /* We possibly migrate to another PE */ | ||
1124 | eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); | ||
1125 | } | ||
1126 | |||
1127 | /* | ||
1128 | * If we have no errors on the specific PHB or only | ||
1129 | * informative error there, we continue poking it. | ||
1130 | * Otherwise, we need actions to be taken by upper | ||
1131 | * layer. | ||
1132 | */ | ||
1133 | if (ret > EEH_NEXT_ERR_INF) | ||
1134 | break; | ||
1135 | } | ||
1136 | |||
1137 | return ret; | ||
1138 | } | ||
1139 | |||
1140 | struct pnv_eeh_ops ioda_eeh_ops = { | ||
1141 | .post_init = ioda_eeh_post_init, | ||
1142 | .set_option = ioda_eeh_set_option, | ||
1143 | .get_state = ioda_eeh_get_state, | ||
1144 | .reset = ioda_eeh_reset, | ||
1145 | .get_log = ioda_eeh_get_log, | ||
1146 | .configure_bridge = ioda_eeh_configure_bridge, | ||
1147 | .err_inject = ioda_eeh_err_inject, | ||
1148 | .next_error = ioda_eeh_next_error | ||
1149 | }; | ||
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index e261869adc86..ce738ab3d5a9 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/atomic.h> | 14 | #include <linux/atomic.h> |
15 | #include <linux/debugfs.h> | ||
15 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
16 | #include <linux/export.h> | 17 | #include <linux/export.h> |
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
@@ -38,12 +39,14 @@ | |||
38 | #include "powernv.h" | 39 | #include "powernv.h" |
39 | #include "pci.h" | 40 | #include "pci.h" |
40 | 41 | ||
42 | static bool pnv_eeh_nb_init = false; | ||
43 | |||
41 | /** | 44 | /** |
42 | * powernv_eeh_init - EEH platform dependent initialization | 45 | * pnv_eeh_init - EEH platform dependent initialization |
43 | * | 46 | * |
44 | * EEH platform dependent initialization on powernv | 47 | * EEH platform dependent initialization on powernv |
45 | */ | 48 | */ |
46 | static int powernv_eeh_init(void) | 49 | static int pnv_eeh_init(void) |
47 | { | 50 | { |
48 | struct pci_controller *hose; | 51 | struct pci_controller *hose; |
49 | struct pnv_phb *phb; | 52 | struct pnv_phb *phb; |
@@ -85,37 +88,280 @@ static int powernv_eeh_init(void) | |||
85 | return 0; | 88 | return 0; |
86 | } | 89 | } |
87 | 90 | ||
91 | static int pnv_eeh_event(struct notifier_block *nb, | ||
92 | unsigned long events, void *change) | ||
93 | { | ||
94 | uint64_t changed_evts = (uint64_t)change; | ||
95 | |||
96 | /* | ||
97 | * We simply send special EEH event if EEH has | ||
98 | * been enabled, or clear pending events in | ||
99 | * case that we enable EEH soon | ||
100 | */ | ||
101 | if (!(changed_evts & OPAL_EVENT_PCI_ERROR) || | ||
102 | !(events & OPAL_EVENT_PCI_ERROR)) | ||
103 | return 0; | ||
104 | |||
105 | if (eeh_enabled()) | ||
106 | eeh_send_failure_event(NULL); | ||
107 | else | ||
108 | opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static struct notifier_block pnv_eeh_nb = { | ||
114 | .notifier_call = pnv_eeh_event, | ||
115 | .next = NULL, | ||
116 | .priority = 0 | ||
117 | }; | ||
118 | |||
119 | #ifdef CONFIG_DEBUG_FS | ||
120 | static ssize_t pnv_eeh_ei_write(struct file *filp, | ||
121 | const char __user *user_buf, | ||
122 | size_t count, loff_t *ppos) | ||
123 | { | ||
124 | struct pci_controller *hose = filp->private_data; | ||
125 | struct eeh_dev *edev; | ||
126 | struct eeh_pe *pe; | ||
127 | int pe_no, type, func; | ||
128 | unsigned long addr, mask; | ||
129 | char buf[50]; | ||
130 | int ret; | ||
131 | |||
132 | if (!eeh_ops || !eeh_ops->err_inject) | ||
133 | return -ENXIO; | ||
134 | |||
135 | /* Copy over argument buffer */ | ||
136 | ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); | ||
137 | if (!ret) | ||
138 | return -EFAULT; | ||
139 | |||
140 | /* Retrieve parameters */ | ||
141 | ret = sscanf(buf, "%x:%x:%x:%lx:%lx", | ||
142 | &pe_no, &type, &func, &addr, &mask); | ||
143 | if (ret != 5) | ||
144 | return -EINVAL; | ||
145 | |||
146 | /* Retrieve PE */ | ||
147 | edev = kzalloc(sizeof(*edev), GFP_KERNEL); | ||
148 | if (!edev) | ||
149 | return -ENOMEM; | ||
150 | edev->phb = hose; | ||
151 | edev->pe_config_addr = pe_no; | ||
152 | pe = eeh_pe_get(edev); | ||
153 | kfree(edev); | ||
154 | if (!pe) | ||
155 | return -ENODEV; | ||
156 | |||
157 | /* Do error injection */ | ||
158 | ret = eeh_ops->err_inject(pe, type, func, addr, mask); | ||
159 | return ret < 0 ? ret : count; | ||
160 | } | ||
161 | |||
162 | static const struct file_operations pnv_eeh_ei_fops = { | ||
163 | .open = simple_open, | ||
164 | .llseek = no_llseek, | ||
165 | .write = pnv_eeh_ei_write, | ||
166 | }; | ||
167 | |||
168 | static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val) | ||
169 | { | ||
170 | struct pci_controller *hose = data; | ||
171 | struct pnv_phb *phb = hose->private_data; | ||
172 | |||
173 | out_be64(phb->regs + offset, val); | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val) | ||
178 | { | ||
179 | struct pci_controller *hose = data; | ||
180 | struct pnv_phb *phb = hose->private_data; | ||
181 | |||
182 | *val = in_be64(phb->regs + offset); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static int pnv_eeh_outb_dbgfs_set(void *data, u64 val) | ||
187 | { | ||
188 | return pnv_eeh_dbgfs_set(data, 0xD10, val); | ||
189 | } | ||
190 | |||
191 | static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val) | ||
192 | { | ||
193 | return pnv_eeh_dbgfs_get(data, 0xD10, val); | ||
194 | } | ||
195 | |||
196 | static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val) | ||
197 | { | ||
198 | return pnv_eeh_dbgfs_set(data, 0xD90, val); | ||
199 | } | ||
200 | |||
201 | static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val) | ||
202 | { | ||
203 | return pnv_eeh_dbgfs_get(data, 0xD90, val); | ||
204 | } | ||
205 | |||
206 | static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val) | ||
207 | { | ||
208 | return pnv_eeh_dbgfs_set(data, 0xE10, val); | ||
209 | } | ||
210 | |||
211 | static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val) | ||
212 | { | ||
213 | return pnv_eeh_dbgfs_get(data, 0xE10, val); | ||
214 | } | ||
215 | |||
216 | DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get, | ||
217 | pnv_eeh_outb_dbgfs_set, "0x%llx\n"); | ||
218 | DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get, | ||
219 | pnv_eeh_inbA_dbgfs_set, "0x%llx\n"); | ||
220 | DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get, | ||
221 | pnv_eeh_inbB_dbgfs_set, "0x%llx\n"); | ||
222 | #endif /* CONFIG_DEBUG_FS */ | ||
223 | |||
88 | /** | 224 | /** |
89 | * powernv_eeh_post_init - EEH platform dependent post initialization | 225 | * pnv_eeh_post_init - EEH platform dependent post initialization |
90 | * | 226 | * |
91 | * EEH platform dependent post initialization on powernv. When | 227 | * EEH platform dependent post initialization on powernv. When |
92 | * the function is called, the EEH PEs and devices should have | 228 | * the function is called, the EEH PEs and devices should have |
93 | * been built. If the I/O cache staff has been built, EEH is | 229 | * been built. If the I/O cache staff has been built, EEH is |
94 | * ready to supply service. | 230 | * ready to supply service. |
95 | */ | 231 | */ |
96 | static int powernv_eeh_post_init(void) | 232 | static int pnv_eeh_post_init(void) |
97 | { | 233 | { |
98 | struct pci_controller *hose; | 234 | struct pci_controller *hose; |
99 | struct pnv_phb *phb; | 235 | struct pnv_phb *phb; |
100 | int ret = 0; | 236 | int ret = 0; |
101 | 237 | ||
238 | /* Register OPAL event notifier */ | ||
239 | if (!pnv_eeh_nb_init) { | ||
240 | ret = opal_notifier_register(&pnv_eeh_nb); | ||
241 | if (ret) { | ||
242 | pr_warn("%s: Can't register OPAL event notifier (%d)\n", | ||
243 | __func__, ret); | ||
244 | return ret; | ||
245 | } | ||
246 | |||
247 | pnv_eeh_nb_init = true; | ||
248 | } | ||
249 | |||
102 | list_for_each_entry(hose, &hose_list, list_node) { | 250 | list_for_each_entry(hose, &hose_list, list_node) { |
103 | phb = hose->private_data; | 251 | phb = hose->private_data; |
104 | 252 | ||
105 | if (phb->eeh_ops && phb->eeh_ops->post_init) { | 253 | /* |
106 | ret = phb->eeh_ops->post_init(hose); | 254 | * If EEH is enabled, we're going to rely on that. |
107 | if (ret) | 255 | * Otherwise, we restore to conventional mechanism |
108 | break; | 256 | * to clear frozen PE during PCI config access. |
109 | } | 257 | */ |
258 | if (eeh_enabled()) | ||
259 | phb->flags |= PNV_PHB_FLAG_EEH; | ||
260 | else | ||
261 | phb->flags &= ~PNV_PHB_FLAG_EEH; | ||
262 | |||
263 | /* Create debugfs entries */ | ||
264 | #ifdef CONFIG_DEBUG_FS | ||
265 | if (phb->has_dbgfs || !phb->dbgfs) | ||
266 | continue; | ||
267 | |||
268 | phb->has_dbgfs = 1; | ||
269 | debugfs_create_file("err_injct", 0200, | ||
270 | phb->dbgfs, hose, | ||
271 | &pnv_eeh_ei_fops); | ||
272 | |||
273 | debugfs_create_file("err_injct_outbound", 0600, | ||
274 | phb->dbgfs, hose, | ||
275 | &pnv_eeh_outb_dbgfs_ops); | ||
276 | debugfs_create_file("err_injct_inboundA", 0600, | ||
277 | phb->dbgfs, hose, | ||
278 | &pnv_eeh_inbA_dbgfs_ops); | ||
279 | debugfs_create_file("err_injct_inboundB", 0600, | ||
280 | phb->dbgfs, hose, | ||
281 | &pnv_eeh_inbB_dbgfs_ops); | ||
282 | #endif /* CONFIG_DEBUG_FS */ | ||
110 | } | 283 | } |
111 | 284 | ||
285 | |||
112 | return ret; | 286 | return ret; |
113 | } | 287 | } |
114 | 288 | ||
289 | static int pnv_eeh_cap_start(struct pci_dn *pdn) | ||
290 | { | ||
291 | u32 status; | ||
292 | |||
293 | if (!pdn) | ||
294 | return 0; | ||
295 | |||
296 | pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status); | ||
297 | if (!(status & PCI_STATUS_CAP_LIST)) | ||
298 | return 0; | ||
299 | |||
300 | return PCI_CAPABILITY_LIST; | ||
301 | } | ||
302 | |||
303 | static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap) | ||
304 | { | ||
305 | int pos = pnv_eeh_cap_start(pdn); | ||
306 | int cnt = 48; /* Maximal number of capabilities */ | ||
307 | u32 id; | ||
308 | |||
309 | if (!pos) | ||
310 | return 0; | ||
311 | |||
312 | while (cnt--) { | ||
313 | pnv_pci_cfg_read(pdn, pos, 1, &pos); | ||
314 | if (pos < 0x40) | ||
315 | break; | ||
316 | |||
317 | pos &= ~3; | ||
318 | pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id); | ||
319 | if (id == 0xff) | ||
320 | break; | ||
321 | |||
322 | /* Found */ | ||
323 | if (id == cap) | ||
324 | return pos; | ||
325 | |||
326 | /* Next one */ | ||
327 | pos += PCI_CAP_LIST_NEXT; | ||
328 | } | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap) | ||
334 | { | ||
335 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); | ||
336 | u32 header; | ||
337 | int pos = 256, ttl = (4096 - 256) / 8; | ||
338 | |||
339 | if (!edev || !edev->pcie_cap) | ||
340 | return 0; | ||
341 | if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) | ||
342 | return 0; | ||
343 | else if (!header) | ||
344 | return 0; | ||
345 | |||
346 | while (ttl-- > 0) { | ||
347 | if (PCI_EXT_CAP_ID(header) == cap && pos) | ||
348 | return pos; | ||
349 | |||
350 | pos = PCI_EXT_CAP_NEXT(header); | ||
351 | if (pos < 256) | ||
352 | break; | ||
353 | |||
354 | if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) | ||
355 | break; | ||
356 | } | ||
357 | |||
358 | return 0; | ||
359 | } | ||
360 | |||
115 | /** | 361 | /** |
116 | * powernv_eeh_dev_probe - Do probe on PCI device | 362 | * pnv_eeh_probe - Do probe on PCI device |
117 | * @dev: PCI device | 363 | * @pdn: PCI device node |
118 | * @flag: unused | 364 | * @data: unused |
119 | * | 365 | * |
120 | * When EEH module is installed during system boot, all PCI devices | 366 | * When EEH module is installed during system boot, all PCI devices |
121 | * are checked one by one to see if it supports EEH. The function | 367 | * are checked one by one to see if it supports EEH. The function |
@@ -129,12 +375,12 @@ static int powernv_eeh_post_init(void) | |||
129 | * was possiblly triggered by EEH core, the binding between EEH device | 375 | * was possiblly triggered by EEH core, the binding between EEH device |
130 | * and the PCI device isn't built yet. | 376 | * and the PCI device isn't built yet. |
131 | */ | 377 | */ |
132 | static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | 378 | static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) |
133 | { | 379 | { |
134 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | 380 | struct pci_controller *hose = pdn->phb; |
135 | struct pnv_phb *phb = hose->private_data; | 381 | struct pnv_phb *phb = hose->private_data; |
136 | struct device_node *dn = pci_device_to_OF_node(dev); | 382 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); |
137 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | 383 | uint32_t pcie_flags; |
138 | int ret; | 384 | int ret; |
139 | 385 | ||
140 | /* | 386 | /* |
@@ -143,40 +389,42 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | |||
143 | * the root bridge. So it's not reasonable to continue | 389 | * the root bridge. So it's not reasonable to continue |
144 | * the probing. | 390 | * the probing. |
145 | */ | 391 | */ |
146 | if (!dn || !edev || edev->pe) | 392 | if (!edev || edev->pe) |
147 | return 0; | 393 | return NULL; |
148 | 394 | ||
149 | /* Skip for PCI-ISA bridge */ | 395 | /* Skip for PCI-ISA bridge */ |
150 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA) | 396 | if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) |
151 | return 0; | 397 | return NULL; |
152 | 398 | ||
153 | /* Initialize eeh device */ | 399 | /* Initialize eeh device */ |
154 | edev->class_code = dev->class; | 400 | edev->class_code = pdn->class_code; |
155 | edev->mode &= 0xFFFFFF00; | 401 | edev->mode &= 0xFFFFFF00; |
156 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) | 402 | edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); |
403 | edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP); | ||
404 | edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); | ||
405 | if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
157 | edev->mode |= EEH_DEV_BRIDGE; | 406 | edev->mode |= EEH_DEV_BRIDGE; |
158 | edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 407 | if (edev->pcie_cap) { |
159 | if (pci_is_pcie(dev)) { | 408 | pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS, |
160 | edev->pcie_cap = pci_pcie_cap(dev); | 409 | 2, &pcie_flags); |
161 | 410 | pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; | |
162 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) | 411 | if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) |
163 | edev->mode |= EEH_DEV_ROOT_PORT; | 412 | edev->mode |= EEH_DEV_ROOT_PORT; |
164 | else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) | 413 | else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) |
165 | edev->mode |= EEH_DEV_DS_PORT; | 414 | edev->mode |= EEH_DEV_DS_PORT; |
166 | 415 | } | |
167 | edev->aer_cap = pci_find_ext_capability(dev, | ||
168 | PCI_EXT_CAP_ID_ERR); | ||
169 | } | 416 | } |
170 | 417 | ||
171 | edev->config_addr = ((dev->bus->number << 8) | dev->devfn); | 418 | edev->config_addr = (pdn->busno << 8) | (pdn->devfn); |
172 | edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff); | 419 | edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr]; |
173 | 420 | ||
174 | /* Create PE */ | 421 | /* Create PE */ |
175 | ret = eeh_add_to_parent_pe(edev); | 422 | ret = eeh_add_to_parent_pe(edev); |
176 | if (ret) { | 423 | if (ret) { |
177 | pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n", | 424 | pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n", |
178 | __func__, pci_name(dev), ret); | 425 | __func__, hose->global_number, pdn->busno, |
179 | return ret; | 426 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret); |
427 | return NULL; | ||
180 | } | 428 | } |
181 | 429 | ||
182 | /* | 430 | /* |
@@ -195,8 +443,10 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | |||
195 | * Broadcom Austin 4-ports NICs (14e4:1657) | 443 | * Broadcom Austin 4-ports NICs (14e4:1657) |
196 | * Broadcom Shiner 2-ports 10G NICs (14e4:168e) | 444 | * Broadcom Shiner 2-ports 10G NICs (14e4:168e) |
197 | */ | 445 | */ |
198 | if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) || | 446 | if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && |
199 | (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e)) | 447 | pdn->device_id == 0x1657) || |
448 | (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && | ||
449 | pdn->device_id == 0x168e)) | ||
200 | edev->pe->state |= EEH_PE_CFG_RESTRICTED; | 450 | edev->pe->state |= EEH_PE_CFG_RESTRICTED; |
201 | 451 | ||
202 | /* | 452 | /* |
@@ -206,7 +456,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | |||
206 | * to PE reset. | 456 | * to PE reset. |
207 | */ | 457 | */ |
208 | if (!edev->pe->bus) | 458 | if (!edev->pe->bus) |
209 | edev->pe->bus = dev->bus; | 459 | edev->pe->bus = pci_find_bus(hose->global_number, |
460 | pdn->busno); | ||
210 | 461 | ||
211 | /* | 462 | /* |
212 | * Enable EEH explicitly so that we will do EEH check | 463 | * Enable EEH explicitly so that we will do EEH check |
@@ -217,11 +468,11 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | |||
217 | /* Save memory bars */ | 468 | /* Save memory bars */ |
218 | eeh_save_bars(edev); | 469 | eeh_save_bars(edev); |
219 | 470 | ||
220 | return 0; | 471 | return NULL; |
221 | } | 472 | } |
222 | 473 | ||
223 | /** | 474 | /** |
224 | * powernv_eeh_set_option - Initialize EEH or MMIO/DMA reenable | 475 | * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable |
225 | * @pe: EEH PE | 476 | * @pe: EEH PE |
226 | * @option: operation to be issued | 477 | * @option: operation to be issued |
227 | * | 478 | * |
@@ -229,36 +480,236 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | |||
229 | * Currently, following options are support according to PAPR: | 480 | * Currently, following options are support according to PAPR: |
230 | * Enable EEH, Disable EEH, Enable MMIO and Enable DMA | 481 | * Enable EEH, Disable EEH, Enable MMIO and Enable DMA |
231 | */ | 482 | */ |
232 | static int powernv_eeh_set_option(struct eeh_pe *pe, int option) | 483 | static int pnv_eeh_set_option(struct eeh_pe *pe, int option) |
233 | { | 484 | { |
234 | struct pci_controller *hose = pe->phb; | 485 | struct pci_controller *hose = pe->phb; |
235 | struct pnv_phb *phb = hose->private_data; | 486 | struct pnv_phb *phb = hose->private_data; |
236 | int ret = -EEXIST; | 487 | bool freeze_pe = false; |
488 | int opt, ret = 0; | ||
489 | s64 rc; | ||
490 | |||
491 | /* Sanity check on option */ | ||
492 | switch (option) { | ||
493 | case EEH_OPT_DISABLE: | ||
494 | return -EPERM; | ||
495 | case EEH_OPT_ENABLE: | ||
496 | return 0; | ||
497 | case EEH_OPT_THAW_MMIO: | ||
498 | opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; | ||
499 | break; | ||
500 | case EEH_OPT_THAW_DMA: | ||
501 | opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; | ||
502 | break; | ||
503 | case EEH_OPT_FREEZE_PE: | ||
504 | freeze_pe = true; | ||
505 | opt = OPAL_EEH_ACTION_SET_FREEZE_ALL; | ||
506 | break; | ||
507 | default: | ||
508 | pr_warn("%s: Invalid option %d\n", __func__, option); | ||
509 | return -EINVAL; | ||
510 | } | ||
237 | 511 | ||
238 | /* | 512 | /* If PHB supports compound PE, to handle it */ |
239 | * What we need do is pass it down for hardware | 513 | if (freeze_pe) { |
240 | * implementation to handle it. | 514 | if (phb->freeze_pe) { |
241 | */ | 515 | phb->freeze_pe(phb, pe->addr); |
242 | if (phb->eeh_ops && phb->eeh_ops->set_option) | 516 | } else { |
243 | ret = phb->eeh_ops->set_option(pe, option); | 517 | rc = opal_pci_eeh_freeze_set(phb->opal_id, |
518 | pe->addr, opt); | ||
519 | if (rc != OPAL_SUCCESS) { | ||
520 | pr_warn("%s: Failure %lld freezing " | ||
521 | "PHB#%x-PE#%x\n", | ||
522 | __func__, rc, | ||
523 | phb->hose->global_number, pe->addr); | ||
524 | ret = -EIO; | ||
525 | } | ||
526 | } | ||
527 | } else { | ||
528 | if (phb->unfreeze_pe) { | ||
529 | ret = phb->unfreeze_pe(phb, pe->addr, opt); | ||
530 | } else { | ||
531 | rc = opal_pci_eeh_freeze_clear(phb->opal_id, | ||
532 | pe->addr, opt); | ||
533 | if (rc != OPAL_SUCCESS) { | ||
534 | pr_warn("%s: Failure %lld enable %d " | ||
535 | "for PHB#%x-PE#%x\n", | ||
536 | __func__, rc, option, | ||
537 | phb->hose->global_number, pe->addr); | ||
538 | ret = -EIO; | ||
539 | } | ||
540 | } | ||
541 | } | ||
244 | 542 | ||
245 | return ret; | 543 | return ret; |
246 | } | 544 | } |
247 | 545 | ||
248 | /** | 546 | /** |
249 | * powernv_eeh_get_pe_addr - Retrieve PE address | 547 | * pnv_eeh_get_pe_addr - Retrieve PE address |
250 | * @pe: EEH PE | 548 | * @pe: EEH PE |
251 | * | 549 | * |
252 | * Retrieve the PE address according to the given tranditional | 550 | * Retrieve the PE address according to the given tranditional |
253 | * PCI BDF (Bus/Device/Function) address. | 551 | * PCI BDF (Bus/Device/Function) address. |
254 | */ | 552 | */ |
255 | static int powernv_eeh_get_pe_addr(struct eeh_pe *pe) | 553 | static int pnv_eeh_get_pe_addr(struct eeh_pe *pe) |
256 | { | 554 | { |
257 | return pe->addr; | 555 | return pe->addr; |
258 | } | 556 | } |
259 | 557 | ||
558 | static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) | ||
559 | { | ||
560 | struct pnv_phb *phb = pe->phb->private_data; | ||
561 | s64 rc; | ||
562 | |||
563 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, | ||
564 | PNV_PCI_DIAG_BUF_SIZE); | ||
565 | if (rc != OPAL_SUCCESS) | ||
566 | pr_warn("%s: Failure %lld getting PHB#%x diag-data\n", | ||
567 | __func__, rc, pe->phb->global_number); | ||
568 | } | ||
569 | |||
570 | static int pnv_eeh_get_phb_state(struct eeh_pe *pe) | ||
571 | { | ||
572 | struct pnv_phb *phb = pe->phb->private_data; | ||
573 | u8 fstate; | ||
574 | __be16 pcierr; | ||
575 | s64 rc; | ||
576 | int result = 0; | ||
577 | |||
578 | rc = opal_pci_eeh_freeze_status(phb->opal_id, | ||
579 | pe->addr, | ||
580 | &fstate, | ||
581 | &pcierr, | ||
582 | NULL); | ||
583 | if (rc != OPAL_SUCCESS) { | ||
584 | pr_warn("%s: Failure %lld getting PHB#%x state\n", | ||
585 | __func__, rc, phb->hose->global_number); | ||
586 | return EEH_STATE_NOT_SUPPORT; | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * Check PHB state. If the PHB is frozen for the | ||
591 | * first time, to dump the PHB diag-data. | ||
592 | */ | ||
593 | if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { | ||
594 | result = (EEH_STATE_MMIO_ACTIVE | | ||
595 | EEH_STATE_DMA_ACTIVE | | ||
596 | EEH_STATE_MMIO_ENABLED | | ||
597 | EEH_STATE_DMA_ENABLED); | ||
598 | } else if (!(pe->state & EEH_PE_ISOLATED)) { | ||
599 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
600 | pnv_eeh_get_phb_diag(pe); | ||
601 | |||
602 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
603 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | ||
604 | } | ||
605 | |||
606 | return result; | ||
607 | } | ||
608 | |||
609 | static int pnv_eeh_get_pe_state(struct eeh_pe *pe) | ||
610 | { | ||
611 | struct pnv_phb *phb = pe->phb->private_data; | ||
612 | u8 fstate; | ||
613 | __be16 pcierr; | ||
614 | s64 rc; | ||
615 | int result; | ||
616 | |||
617 | /* | ||
618 | * We don't clobber hardware frozen state until PE | ||
619 | * reset is completed. In order to keep EEH core | ||
620 | * moving forward, we have to return operational | ||
621 | * state during PE reset. | ||
622 | */ | ||
623 | if (pe->state & EEH_PE_RESET) { | ||
624 | result = (EEH_STATE_MMIO_ACTIVE | | ||
625 | EEH_STATE_DMA_ACTIVE | | ||
626 | EEH_STATE_MMIO_ENABLED | | ||
627 | EEH_STATE_DMA_ENABLED); | ||
628 | return result; | ||
629 | } | ||
630 | |||
631 | /* | ||
632 | * Fetch PE state from hardware. If the PHB | ||
633 | * supports compound PE, let it handle that. | ||
634 | */ | ||
635 | if (phb->get_pe_state) { | ||
636 | fstate = phb->get_pe_state(phb, pe->addr); | ||
637 | } else { | ||
638 | rc = opal_pci_eeh_freeze_status(phb->opal_id, | ||
639 | pe->addr, | ||
640 | &fstate, | ||
641 | &pcierr, | ||
642 | NULL); | ||
643 | if (rc != OPAL_SUCCESS) { | ||
644 | pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", | ||
645 | __func__, rc, phb->hose->global_number, | ||
646 | pe->addr); | ||
647 | return EEH_STATE_NOT_SUPPORT; | ||
648 | } | ||
649 | } | ||
650 | |||
651 | /* Figure out state */ | ||
652 | switch (fstate) { | ||
653 | case OPAL_EEH_STOPPED_NOT_FROZEN: | ||
654 | result = (EEH_STATE_MMIO_ACTIVE | | ||
655 | EEH_STATE_DMA_ACTIVE | | ||
656 | EEH_STATE_MMIO_ENABLED | | ||
657 | EEH_STATE_DMA_ENABLED); | ||
658 | break; | ||
659 | case OPAL_EEH_STOPPED_MMIO_FREEZE: | ||
660 | result = (EEH_STATE_DMA_ACTIVE | | ||
661 | EEH_STATE_DMA_ENABLED); | ||
662 | break; | ||
663 | case OPAL_EEH_STOPPED_DMA_FREEZE: | ||
664 | result = (EEH_STATE_MMIO_ACTIVE | | ||
665 | EEH_STATE_MMIO_ENABLED); | ||
666 | break; | ||
667 | case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: | ||
668 | result = 0; | ||
669 | break; | ||
670 | case OPAL_EEH_STOPPED_RESET: | ||
671 | result = EEH_STATE_RESET_ACTIVE; | ||
672 | break; | ||
673 | case OPAL_EEH_STOPPED_TEMP_UNAVAIL: | ||
674 | result = EEH_STATE_UNAVAILABLE; | ||
675 | break; | ||
676 | case OPAL_EEH_STOPPED_PERM_UNAVAIL: | ||
677 | result = EEH_STATE_NOT_SUPPORT; | ||
678 | break; | ||
679 | default: | ||
680 | result = EEH_STATE_NOT_SUPPORT; | ||
681 | pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", | ||
682 | __func__, phb->hose->global_number, | ||
683 | pe->addr, fstate); | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * If PHB supports compound PE, to freeze all | ||
688 | * slave PEs for consistency. | ||
689 | * | ||
690 | * If the PE is switching to frozen state for the | ||
691 | * first time, to dump the PHB diag-data. | ||
692 | */ | ||
693 | if (!(result & EEH_STATE_NOT_SUPPORT) && | ||
694 | !(result & EEH_STATE_UNAVAILABLE) && | ||
695 | !(result & EEH_STATE_MMIO_ACTIVE) && | ||
696 | !(result & EEH_STATE_DMA_ACTIVE) && | ||
697 | !(pe->state & EEH_PE_ISOLATED)) { | ||
698 | if (phb->freeze_pe) | ||
699 | phb->freeze_pe(phb, pe->addr); | ||
700 | |||
701 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
702 | pnv_eeh_get_phb_diag(pe); | ||
703 | |||
704 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
705 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | ||
706 | } | ||
707 | |||
708 | return result; | ||
709 | } | ||
710 | |||
260 | /** | 711 | /** |
261 | * powernv_eeh_get_state - Retrieve PE state | 712 | * pnv_eeh_get_state - Retrieve PE state |
262 | * @pe: EEH PE | 713 | * @pe: EEH PE |
263 | * @delay: delay while PE state is temporarily unavailable | 714 | * @delay: delay while PE state is temporarily unavailable |
264 | * | 715 | * |
@@ -267,64 +718,279 @@ static int powernv_eeh_get_pe_addr(struct eeh_pe *pe) | |||
267 | * we prefer passing down to hardware implementation to handle | 718 | * we prefer passing down to hardware implementation to handle |
268 | * it. | 719 | * it. |
269 | */ | 720 | */ |
270 | static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay) | 721 | static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay) |
722 | { | ||
723 | int ret; | ||
724 | |||
725 | if (pe->type & EEH_PE_PHB) | ||
726 | ret = pnv_eeh_get_phb_state(pe); | ||
727 | else | ||
728 | ret = pnv_eeh_get_pe_state(pe); | ||
729 | |||
730 | if (!delay) | ||
731 | return ret; | ||
732 | |||
733 | /* | ||
734 | * If the PE state is temporarily unavailable, | ||
735 | * to inform the EEH core delay for default | ||
736 | * period (1 second) | ||
737 | */ | ||
738 | *delay = 0; | ||
739 | if (ret & EEH_STATE_UNAVAILABLE) | ||
740 | *delay = 1000; | ||
741 | |||
742 | return ret; | ||
743 | } | ||
744 | |||
745 | static s64 pnv_eeh_phb_poll(struct pnv_phb *phb) | ||
746 | { | ||
747 | s64 rc = OPAL_HARDWARE; | ||
748 | |||
749 | while (1) { | ||
750 | rc = opal_pci_poll(phb->opal_id); | ||
751 | if (rc <= 0) | ||
752 | break; | ||
753 | |||
754 | if (system_state < SYSTEM_RUNNING) | ||
755 | udelay(1000 * rc); | ||
756 | else | ||
757 | msleep(rc); | ||
758 | } | ||
759 | |||
760 | return rc; | ||
761 | } | ||
762 | |||
763 | int pnv_eeh_phb_reset(struct pci_controller *hose, int option) | ||
271 | { | 764 | { |
272 | struct pci_controller *hose = pe->phb; | ||
273 | struct pnv_phb *phb = hose->private_data; | 765 | struct pnv_phb *phb = hose->private_data; |
274 | int ret = EEH_STATE_NOT_SUPPORT; | 766 | s64 rc = OPAL_HARDWARE; |
767 | |||
768 | pr_debug("%s: Reset PHB#%x, option=%d\n", | ||
769 | __func__, hose->global_number, option); | ||
770 | |||
771 | /* Issue PHB complete reset request */ | ||
772 | if (option == EEH_RESET_FUNDAMENTAL || | ||
773 | option == EEH_RESET_HOT) | ||
774 | rc = opal_pci_reset(phb->opal_id, | ||
775 | OPAL_RESET_PHB_COMPLETE, | ||
776 | OPAL_ASSERT_RESET); | ||
777 | else if (option == EEH_RESET_DEACTIVATE) | ||
778 | rc = opal_pci_reset(phb->opal_id, | ||
779 | OPAL_RESET_PHB_COMPLETE, | ||
780 | OPAL_DEASSERT_RESET); | ||
781 | if (rc < 0) | ||
782 | goto out; | ||
275 | 783 | ||
276 | if (phb->eeh_ops && phb->eeh_ops->get_state) { | 784 | /* |
277 | ret = phb->eeh_ops->get_state(pe); | 785 | * Poll state of the PHB until the request is done |
786 | * successfully. The PHB reset is usually PHB complete | ||
787 | * reset followed by hot reset on root bus. So we also | ||
788 | * need the PCI bus settlement delay. | ||
789 | */ | ||
790 | rc = pnv_eeh_phb_poll(phb); | ||
791 | if (option == EEH_RESET_DEACTIVATE) { | ||
792 | if (system_state < SYSTEM_RUNNING) | ||
793 | udelay(1000 * EEH_PE_RST_SETTLE_TIME); | ||
794 | else | ||
795 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
796 | } | ||
797 | out: | ||
798 | if (rc != OPAL_SUCCESS) | ||
799 | return -EIO; | ||
278 | 800 | ||
279 | /* | 801 | return 0; |
280 | * If the PE state is temporarily unavailable, | 802 | } |
281 | * to inform the EEH core delay for default | 803 | |
282 | * period (1 second) | 804 | static int pnv_eeh_root_reset(struct pci_controller *hose, int option) |
283 | */ | 805 | { |
284 | if (delay) { | 806 | struct pnv_phb *phb = hose->private_data; |
285 | *delay = 0; | 807 | s64 rc = OPAL_HARDWARE; |
286 | if (ret & EEH_STATE_UNAVAILABLE) | 808 | |
287 | *delay = 1000; | 809 | pr_debug("%s: Reset PHB#%x, option=%d\n", |
810 | __func__, hose->global_number, option); | ||
811 | |||
812 | /* | ||
813 | * During the reset deassert time, we needn't care | ||
814 | * the reset scope because the firmware does nothing | ||
815 | * for fundamental or hot reset during deassert phase. | ||
816 | */ | ||
817 | if (option == EEH_RESET_FUNDAMENTAL) | ||
818 | rc = opal_pci_reset(phb->opal_id, | ||
819 | OPAL_RESET_PCI_FUNDAMENTAL, | ||
820 | OPAL_ASSERT_RESET); | ||
821 | else if (option == EEH_RESET_HOT) | ||
822 | rc = opal_pci_reset(phb->opal_id, | ||
823 | OPAL_RESET_PCI_HOT, | ||
824 | OPAL_ASSERT_RESET); | ||
825 | else if (option == EEH_RESET_DEACTIVATE) | ||
826 | rc = opal_pci_reset(phb->opal_id, | ||
827 | OPAL_RESET_PCI_HOT, | ||
828 | OPAL_DEASSERT_RESET); | ||
829 | if (rc < 0) | ||
830 | goto out; | ||
831 | |||
832 | /* Poll state of the PHB until the request is done */ | ||
833 | rc = pnv_eeh_phb_poll(phb); | ||
834 | if (option == EEH_RESET_DEACTIVATE) | ||
835 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
836 | out: | ||
837 | if (rc != OPAL_SUCCESS) | ||
838 | return -EIO; | ||
839 | |||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option) | ||
844 | { | ||
845 | struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); | ||
846 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); | ||
847 | int aer = edev ? edev->aer_cap : 0; | ||
848 | u32 ctrl; | ||
849 | |||
850 | pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n", | ||
851 | __func__, pci_domain_nr(dev->bus), | ||
852 | dev->bus->number, option); | ||
853 | |||
854 | switch (option) { | ||
855 | case EEH_RESET_FUNDAMENTAL: | ||
856 | case EEH_RESET_HOT: | ||
857 | /* Don't report linkDown event */ | ||
858 | if (aer) { | ||
859 | eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK, | ||
860 | 4, &ctrl); | ||
861 | ctrl |= PCI_ERR_UNC_SURPDN; | ||
862 | eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK, | ||
863 | 4, ctrl); | ||
288 | } | 864 | } |
865 | |||
866 | eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl); | ||
867 | ctrl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
868 | eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl); | ||
869 | |||
870 | msleep(EEH_PE_RST_HOLD_TIME); | ||
871 | break; | ||
872 | case EEH_RESET_DEACTIVATE: | ||
873 | eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl); | ||
874 | ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
875 | eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl); | ||
876 | |||
877 | msleep(EEH_PE_RST_SETTLE_TIME); | ||
878 | |||
879 | /* Continue reporting linkDown event */ | ||
880 | if (aer) { | ||
881 | eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK, | ||
882 | 4, &ctrl); | ||
883 | ctrl &= ~PCI_ERR_UNC_SURPDN; | ||
884 | eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK, | ||
885 | 4, ctrl); | ||
886 | } | ||
887 | |||
888 | break; | ||
289 | } | 889 | } |
290 | 890 | ||
291 | return ret; | 891 | return 0; |
892 | } | ||
893 | |||
894 | void pnv_pci_reset_secondary_bus(struct pci_dev *dev) | ||
895 | { | ||
896 | struct pci_controller *hose; | ||
897 | |||
898 | if (pci_is_root_bus(dev->bus)) { | ||
899 | hose = pci_bus_to_host(dev->bus); | ||
900 | pnv_eeh_root_reset(hose, EEH_RESET_HOT); | ||
901 | pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); | ||
902 | } else { | ||
903 | pnv_eeh_bridge_reset(dev, EEH_RESET_HOT); | ||
904 | pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); | ||
905 | } | ||
292 | } | 906 | } |
293 | 907 | ||
294 | /** | 908 | /** |
295 | * powernv_eeh_reset - Reset the specified PE | 909 | * pnv_eeh_reset - Reset the specified PE |
296 | * @pe: EEH PE | 910 | * @pe: EEH PE |
297 | * @option: reset option | 911 | * @option: reset option |
298 | * | 912 | * |
299 | * Reset the specified PE | 913 | * Do reset on the indicated PE. For PCI bus sensitive PE, |
914 | * we need to reset the parent p2p bridge. The PHB has to | ||
915 | * be reinitialized if the p2p bridge is root bridge. For | ||
916 | * PCI device sensitive PE, we will try to reset the device | ||
917 | * through FLR. For now, we don't have OPAL APIs to do HARD | ||
918 | * reset yet, so all reset would be SOFT (HOT) reset. | ||
300 | */ | 919 | */ |
301 | static int powernv_eeh_reset(struct eeh_pe *pe, int option) | 920 | static int pnv_eeh_reset(struct eeh_pe *pe, int option) |
302 | { | 921 | { |
303 | struct pci_controller *hose = pe->phb; | 922 | struct pci_controller *hose = pe->phb; |
304 | struct pnv_phb *phb = hose->private_data; | 923 | struct pci_bus *bus; |
305 | int ret = -EEXIST; | 924 | int ret; |
925 | |||
926 | /* | ||
927 | * For PHB reset, we always have complete reset. For those PEs whose | ||
928 | * primary bus derived from root complex (root bus) or root port | ||
929 | * (usually bus#1), we apply hot or fundamental reset on the root port. | ||
930 | * For other PEs, we always have hot reset on the PE primary bus. | ||
931 | * | ||
932 | * Here, we have different design to pHyp, which always clear the | ||
933 | * frozen state during PE reset. However, the good idea here from | ||
934 | * benh is to keep frozen state before we get PE reset done completely | ||
935 | * (until BAR restore). With the frozen state, HW drops illegal IO | ||
936 | * or MMIO access, which can incur recrusive frozen PE during PE | ||
937 | * reset. The side effect is that EEH core has to clear the frozen | ||
938 | * state explicitly after BAR restore. | ||
939 | */ | ||
940 | if (pe->type & EEH_PE_PHB) { | ||
941 | ret = pnv_eeh_phb_reset(hose, option); | ||
942 | } else { | ||
943 | struct pnv_phb *phb; | ||
944 | s64 rc; | ||
306 | 945 | ||
307 | if (phb->eeh_ops && phb->eeh_ops->reset) | 946 | /* |
308 | ret = phb->eeh_ops->reset(pe, option); | 947 | * The frozen PE might be caused by PAPR error injection |
948 | * registers, which are expected to be cleared after hitting | ||
949 | * frozen PE as stated in the hardware spec. Unfortunately, | ||
950 | * that's not true on P7IOC. So we have to clear it manually | ||
951 | * to avoid recursive EEH errors during recovery. | ||
952 | */ | ||
953 | phb = hose->private_data; | ||
954 | if (phb->model == PNV_PHB_MODEL_P7IOC && | ||
955 | (option == EEH_RESET_HOT || | ||
956 | option == EEH_RESET_FUNDAMENTAL)) { | ||
957 | rc = opal_pci_reset(phb->opal_id, | ||
958 | OPAL_RESET_PHB_ERROR, | ||
959 | OPAL_ASSERT_RESET); | ||
960 | if (rc != OPAL_SUCCESS) { | ||
961 | pr_warn("%s: Failure %lld clearing " | ||
962 | "error injection registers\n", | ||
963 | __func__, rc); | ||
964 | return -EIO; | ||
965 | } | ||
966 | } | ||
967 | |||
968 | bus = eeh_pe_bus_get(pe); | ||
969 | if (pci_is_root_bus(bus) || | ||
970 | pci_is_root_bus(bus->parent)) | ||
971 | ret = pnv_eeh_root_reset(hose, option); | ||
972 | else | ||
973 | ret = pnv_eeh_bridge_reset(bus->self, option); | ||
974 | } | ||
309 | 975 | ||
310 | return ret; | 976 | return ret; |
311 | } | 977 | } |
312 | 978 | ||
313 | /** | 979 | /** |
314 | * powernv_eeh_wait_state - Wait for PE state | 980 | * pnv_eeh_wait_state - Wait for PE state |
315 | * @pe: EEH PE | 981 | * @pe: EEH PE |
316 | * @max_wait: maximal period in microsecond | 982 | * @max_wait: maximal period in microsecond |
317 | * | 983 | * |
318 | * Wait for the state of associated PE. It might take some time | 984 | * Wait for the state of associated PE. It might take some time |
319 | * to retrieve the PE's state. | 985 | * to retrieve the PE's state. |
320 | */ | 986 | */ |
321 | static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait) | 987 | static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait) |
322 | { | 988 | { |
323 | int ret; | 989 | int ret; |
324 | int mwait; | 990 | int mwait; |
325 | 991 | ||
326 | while (1) { | 992 | while (1) { |
327 | ret = powernv_eeh_get_state(pe, &mwait); | 993 | ret = pnv_eeh_get_state(pe, &mwait); |
328 | 994 | ||
329 | /* | 995 | /* |
330 | * If the PE's state is temporarily unavailable, | 996 | * If the PE's state is temporarily unavailable, |
@@ -348,7 +1014,7 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait) | |||
348 | } | 1014 | } |
349 | 1015 | ||
350 | /** | 1016 | /** |
351 | * powernv_eeh_get_log - Retrieve error log | 1017 | * pnv_eeh_get_log - Retrieve error log |
352 | * @pe: EEH PE | 1018 | * @pe: EEH PE |
353 | * @severity: temporary or permanent error log | 1019 | * @severity: temporary or permanent error log |
354 | * @drv_log: driver log to be combined with retrieved error log | 1020 | * @drv_log: driver log to be combined with retrieved error log |
@@ -356,41 +1022,30 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait) | |||
356 | * | 1022 | * |
357 | * Retrieve the temporary or permanent error from the PE. | 1023 | * Retrieve the temporary or permanent error from the PE. |
358 | */ | 1024 | */ |
359 | static int powernv_eeh_get_log(struct eeh_pe *pe, int severity, | 1025 | static int pnv_eeh_get_log(struct eeh_pe *pe, int severity, |
360 | char *drv_log, unsigned long len) | 1026 | char *drv_log, unsigned long len) |
361 | { | 1027 | { |
362 | struct pci_controller *hose = pe->phb; | 1028 | if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) |
363 | struct pnv_phb *phb = hose->private_data; | 1029 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); |
364 | int ret = -EEXIST; | ||
365 | 1030 | ||
366 | if (phb->eeh_ops && phb->eeh_ops->get_log) | 1031 | return 0; |
367 | ret = phb->eeh_ops->get_log(pe, severity, drv_log, len); | ||
368 | |||
369 | return ret; | ||
370 | } | 1032 | } |
371 | 1033 | ||
372 | /** | 1034 | /** |
373 | * powernv_eeh_configure_bridge - Configure PCI bridges in the indicated PE | 1035 | * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE |
374 | * @pe: EEH PE | 1036 | * @pe: EEH PE |
375 | * | 1037 | * |
376 | * The function will be called to reconfigure the bridges included | 1038 | * The function will be called to reconfigure the bridges included |
377 | * in the specified PE so that the mulfunctional PE would be recovered | 1039 | * in the specified PE so that the mulfunctional PE would be recovered |
378 | * again. | 1040 | * again. |
379 | */ | 1041 | */ |
380 | static int powernv_eeh_configure_bridge(struct eeh_pe *pe) | 1042 | static int pnv_eeh_configure_bridge(struct eeh_pe *pe) |
381 | { | 1043 | { |
382 | struct pci_controller *hose = pe->phb; | 1044 | return 0; |
383 | struct pnv_phb *phb = hose->private_data; | ||
384 | int ret = 0; | ||
385 | |||
386 | if (phb->eeh_ops && phb->eeh_ops->configure_bridge) | ||
387 | ret = phb->eeh_ops->configure_bridge(pe); | ||
388 | |||
389 | return ret; | ||
390 | } | 1045 | } |
391 | 1046 | ||
392 | /** | 1047 | /** |
393 | * powernv_pe_err_inject - Inject specified error to the indicated PE | 1048 | * pnv_pe_err_inject - Inject specified error to the indicated PE |
394 | * @pe: the indicated PE | 1049 | * @pe: the indicated PE |
395 | * @type: error type | 1050 | * @type: error type |
396 | * @func: specific error type | 1051 | * @func: specific error type |
@@ -401,22 +1056,52 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe) | |||
401 | * determined by @type and @func, to the indicated PE for | 1056 | * determined by @type and @func, to the indicated PE for |
402 | * testing purpose. | 1057 | * testing purpose. |
403 | */ | 1058 | */ |
404 | static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func, | 1059 | static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func, |
405 | unsigned long addr, unsigned long mask) | 1060 | unsigned long addr, unsigned long mask) |
406 | { | 1061 | { |
407 | struct pci_controller *hose = pe->phb; | 1062 | struct pci_controller *hose = pe->phb; |
408 | struct pnv_phb *phb = hose->private_data; | 1063 | struct pnv_phb *phb = hose->private_data; |
409 | int ret = -EEXIST; | 1064 | s64 rc; |
1065 | |||
1066 | /* Sanity check on error type */ | ||
1067 | if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && | ||
1068 | type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { | ||
1069 | pr_warn("%s: Invalid error type %d\n", | ||
1070 | __func__, type); | ||
1071 | return -ERANGE; | ||
1072 | } | ||
410 | 1073 | ||
411 | if (phb->eeh_ops && phb->eeh_ops->err_inject) | 1074 | if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || |
412 | ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask); | 1075 | func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { |
1076 | pr_warn("%s: Invalid error function %d\n", | ||
1077 | __func__, func); | ||
1078 | return -ERANGE; | ||
1079 | } | ||
413 | 1080 | ||
414 | return ret; | 1081 | /* Firmware supports error injection ? */ |
1082 | if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { | ||
1083 | pr_warn("%s: Firmware doesn't support error injection\n", | ||
1084 | __func__); | ||
1085 | return -ENXIO; | ||
1086 | } | ||
1087 | |||
1088 | /* Do error injection */ | ||
1089 | rc = opal_pci_err_inject(phb->opal_id, pe->addr, | ||
1090 | type, func, addr, mask); | ||
1091 | if (rc != OPAL_SUCCESS) { | ||
1092 | pr_warn("%s: Failure %lld injecting error " | ||
1093 | "%d-%d to PHB#%x-PE#%x\n", | ||
1094 | __func__, rc, type, func, | ||
1095 | hose->global_number, pe->addr); | ||
1096 | return -EIO; | ||
1097 | } | ||
1098 | |||
1099 | return 0; | ||
415 | } | 1100 | } |
416 | 1101 | ||
417 | static inline bool powernv_eeh_cfg_blocked(struct device_node *dn) | 1102 | static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn) |
418 | { | 1103 | { |
419 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | 1104 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); |
420 | 1105 | ||
421 | if (!edev || !edev->pe) | 1106 | if (!edev || !edev->pe) |
422 | return false; | 1107 | return false; |
@@ -427,51 +1112,377 @@ static inline bool powernv_eeh_cfg_blocked(struct device_node *dn) | |||
427 | return false; | 1112 | return false; |
428 | } | 1113 | } |
429 | 1114 | ||
430 | static int powernv_eeh_read_config(struct device_node *dn, | 1115 | static int pnv_eeh_read_config(struct pci_dn *pdn, |
431 | int where, int size, u32 *val) | 1116 | int where, int size, u32 *val) |
432 | { | 1117 | { |
433 | if (powernv_eeh_cfg_blocked(dn)) { | 1118 | if (!pdn) |
1119 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
1120 | |||
1121 | if (pnv_eeh_cfg_blocked(pdn)) { | ||
434 | *val = 0xFFFFFFFF; | 1122 | *val = 0xFFFFFFFF; |
435 | return PCIBIOS_SET_FAILED; | 1123 | return PCIBIOS_SET_FAILED; |
436 | } | 1124 | } |
437 | 1125 | ||
438 | return pnv_pci_cfg_read(dn, where, size, val); | 1126 | return pnv_pci_cfg_read(pdn, where, size, val); |
439 | } | 1127 | } |
440 | 1128 | ||
441 | static int powernv_eeh_write_config(struct device_node *dn, | 1129 | static int pnv_eeh_write_config(struct pci_dn *pdn, |
442 | int where, int size, u32 val) | 1130 | int where, int size, u32 val) |
443 | { | 1131 | { |
444 | if (powernv_eeh_cfg_blocked(dn)) | 1132 | if (!pdn) |
1133 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
1134 | |||
1135 | if (pnv_eeh_cfg_blocked(pdn)) | ||
445 | return PCIBIOS_SET_FAILED; | 1136 | return PCIBIOS_SET_FAILED; |
446 | 1137 | ||
447 | return pnv_pci_cfg_write(dn, where, size, val); | 1138 | return pnv_pci_cfg_write(pdn, where, size, val); |
1139 | } | ||
1140 | |||
1141 | static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data) | ||
1142 | { | ||
1143 | /* GEM */ | ||
1144 | if (data->gemXfir || data->gemRfir || | ||
1145 | data->gemRirqfir || data->gemMask || data->gemRwof) | ||
1146 | pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", | ||
1147 | be64_to_cpu(data->gemXfir), | ||
1148 | be64_to_cpu(data->gemRfir), | ||
1149 | be64_to_cpu(data->gemRirqfir), | ||
1150 | be64_to_cpu(data->gemMask), | ||
1151 | be64_to_cpu(data->gemRwof)); | ||
1152 | |||
1153 | /* LEM */ | ||
1154 | if (data->lemFir || data->lemErrMask || | ||
1155 | data->lemAction0 || data->lemAction1 || data->lemWof) | ||
1156 | pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", | ||
1157 | be64_to_cpu(data->lemFir), | ||
1158 | be64_to_cpu(data->lemErrMask), | ||
1159 | be64_to_cpu(data->lemAction0), | ||
1160 | be64_to_cpu(data->lemAction1), | ||
1161 | be64_to_cpu(data->lemWof)); | ||
1162 | } | ||
1163 | |||
1164 | static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) | ||
1165 | { | ||
1166 | struct pnv_phb *phb = hose->private_data; | ||
1167 | struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; | ||
1168 | long rc; | ||
1169 | |||
1170 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); | ||
1171 | if (rc != OPAL_SUCCESS) { | ||
1172 | pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", | ||
1173 | __func__, phb->hub_id, rc); | ||
1174 | return; | ||
1175 | } | ||
1176 | |||
1177 | switch (data->type) { | ||
1178 | case OPAL_P7IOC_DIAG_TYPE_RGC: | ||
1179 | pr_info("P7IOC diag-data for RGC\n\n"); | ||
1180 | pnv_eeh_dump_hub_diag_common(data); | ||
1181 | if (data->rgc.rgcStatus || data->rgc.rgcLdcp) | ||
1182 | pr_info(" RGC: %016llx %016llx\n", | ||
1183 | be64_to_cpu(data->rgc.rgcStatus), | ||
1184 | be64_to_cpu(data->rgc.rgcLdcp)); | ||
1185 | break; | ||
1186 | case OPAL_P7IOC_DIAG_TYPE_BI: | ||
1187 | pr_info("P7IOC diag-data for BI %s\n\n", | ||
1188 | data->bi.biDownbound ? "Downbound" : "Upbound"); | ||
1189 | pnv_eeh_dump_hub_diag_common(data); | ||
1190 | if (data->bi.biLdcp0 || data->bi.biLdcp1 || | ||
1191 | data->bi.biLdcp2 || data->bi.biFenceStatus) | ||
1192 | pr_info(" BI: %016llx %016llx %016llx %016llx\n", | ||
1193 | be64_to_cpu(data->bi.biLdcp0), | ||
1194 | be64_to_cpu(data->bi.biLdcp1), | ||
1195 | be64_to_cpu(data->bi.biLdcp2), | ||
1196 | be64_to_cpu(data->bi.biFenceStatus)); | ||
1197 | break; | ||
1198 | case OPAL_P7IOC_DIAG_TYPE_CI: | ||
1199 | pr_info("P7IOC diag-data for CI Port %d\n\n", | ||
1200 | data->ci.ciPort); | ||
1201 | pnv_eeh_dump_hub_diag_common(data); | ||
1202 | if (data->ci.ciPortStatus || data->ci.ciPortLdcp) | ||
1203 | pr_info(" CI: %016llx %016llx\n", | ||
1204 | be64_to_cpu(data->ci.ciPortStatus), | ||
1205 | be64_to_cpu(data->ci.ciPortLdcp)); | ||
1206 | break; | ||
1207 | case OPAL_P7IOC_DIAG_TYPE_MISC: | ||
1208 | pr_info("P7IOC diag-data for MISC\n\n"); | ||
1209 | pnv_eeh_dump_hub_diag_common(data); | ||
1210 | break; | ||
1211 | case OPAL_P7IOC_DIAG_TYPE_I2C: | ||
1212 | pr_info("P7IOC diag-data for I2C\n\n"); | ||
1213 | pnv_eeh_dump_hub_diag_common(data); | ||
1214 | break; | ||
1215 | default: | ||
1216 | pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", | ||
1217 | __func__, phb->hub_id, data->type); | ||
1218 | } | ||
1219 | } | ||
1220 | |||
1221 | static int pnv_eeh_get_pe(struct pci_controller *hose, | ||
1222 | u16 pe_no, struct eeh_pe **pe) | ||
1223 | { | ||
1224 | struct pnv_phb *phb = hose->private_data; | ||
1225 | struct pnv_ioda_pe *pnv_pe; | ||
1226 | struct eeh_pe *dev_pe; | ||
1227 | struct eeh_dev edev; | ||
1228 | |||
1229 | /* | ||
1230 | * If PHB supports compound PE, to fetch | ||
1231 | * the master PE because slave PE is invisible | ||
1232 | * to EEH core. | ||
1233 | */ | ||
1234 | pnv_pe = &phb->ioda.pe_array[pe_no]; | ||
1235 | if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { | ||
1236 | pnv_pe = pnv_pe->master; | ||
1237 | WARN_ON(!pnv_pe || | ||
1238 | !(pnv_pe->flags & PNV_IODA_PE_MASTER)); | ||
1239 | pe_no = pnv_pe->pe_number; | ||
1240 | } | ||
1241 | |||
1242 | /* Find the PE according to PE# */ | ||
1243 | memset(&edev, 0, sizeof(struct eeh_dev)); | ||
1244 | edev.phb = hose; | ||
1245 | edev.pe_config_addr = pe_no; | ||
1246 | dev_pe = eeh_pe_get(&edev); | ||
1247 | if (!dev_pe) | ||
1248 | return -EEXIST; | ||
1249 | |||
1250 | /* Freeze the (compound) PE */ | ||
1251 | *pe = dev_pe; | ||
1252 | if (!(dev_pe->state & EEH_PE_ISOLATED)) | ||
1253 | phb->freeze_pe(phb, pe_no); | ||
1254 | |||
1255 | /* | ||
1256 | * At this point, we're sure the (compound) PE should | ||
1257 | * have been frozen. However, we still need poke until | ||
1258 | * hitting the frozen PE on top level. | ||
1259 | */ | ||
1260 | dev_pe = dev_pe->parent; | ||
1261 | while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { | ||
1262 | int ret; | ||
1263 | int active_flags = (EEH_STATE_MMIO_ACTIVE | | ||
1264 | EEH_STATE_DMA_ACTIVE); | ||
1265 | |||
1266 | ret = eeh_ops->get_state(dev_pe, NULL); | ||
1267 | if (ret <= 0 || (ret & active_flags) == active_flags) { | ||
1268 | dev_pe = dev_pe->parent; | ||
1269 | continue; | ||
1270 | } | ||
1271 | |||
1272 | /* Frozen parent PE */ | ||
1273 | *pe = dev_pe; | ||
1274 | if (!(dev_pe->state & EEH_PE_ISOLATED)) | ||
1275 | phb->freeze_pe(phb, dev_pe->addr); | ||
1276 | |||
1277 | /* Next one */ | ||
1278 | dev_pe = dev_pe->parent; | ||
1279 | } | ||
1280 | |||
1281 | return 0; | ||
448 | } | 1282 | } |
449 | 1283 | ||
450 | /** | 1284 | /** |
451 | * powernv_eeh_next_error - Retrieve next EEH error to handle | 1285 | * pnv_eeh_next_error - Retrieve next EEH error to handle |
452 | * @pe: Affected PE | 1286 | * @pe: Affected PE |
453 | * | 1287 | * |
454 | * Using OPAL API, to retrieve next EEH error for EEH core to handle | 1288 | * The function is expected to be called by EEH core while it gets |
1289 | * special EEH event (without binding PE). The function calls to | ||
1290 | * OPAL APIs for next error to handle. The informational error is | ||
1291 | * handled internally by platform. However, the dead IOC, dead PHB, | ||
1292 | * fenced PHB and frozen PE should be handled by EEH core eventually. | ||
455 | */ | 1293 | */ |
456 | static int powernv_eeh_next_error(struct eeh_pe **pe) | 1294 | static int pnv_eeh_next_error(struct eeh_pe **pe) |
457 | { | 1295 | { |
458 | struct pci_controller *hose; | 1296 | struct pci_controller *hose; |
459 | struct pnv_phb *phb = NULL; | 1297 | struct pnv_phb *phb; |
1298 | struct eeh_pe *phb_pe, *parent_pe; | ||
1299 | __be64 frozen_pe_no; | ||
1300 | __be16 err_type, severity; | ||
1301 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | ||
1302 | long rc; | ||
1303 | int state, ret = EEH_NEXT_ERR_NONE; | ||
1304 | |||
1305 | /* | ||
1306 | * While running here, it's safe to purge the event queue. | ||
1307 | * And we should keep the cached OPAL notifier event sychronized | ||
1308 | * between the kernel and firmware. | ||
1309 | */ | ||
1310 | eeh_remove_event(NULL, false); | ||
1311 | opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); | ||
460 | 1312 | ||
461 | list_for_each_entry(hose, &hose_list, list_node) { | 1313 | list_for_each_entry(hose, &hose_list, list_node) { |
1314 | /* | ||
1315 | * If the subordinate PCI buses of the PHB has been | ||
1316 | * removed or is exactly under error recovery, we | ||
1317 | * needn't take care of it any more. | ||
1318 | */ | ||
462 | phb = hose->private_data; | 1319 | phb = hose->private_data; |
463 | break; | 1320 | phb_pe = eeh_phb_pe_get(hose); |
464 | } | 1321 | if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) |
1322 | continue; | ||
1323 | |||
1324 | rc = opal_pci_next_error(phb->opal_id, | ||
1325 | &frozen_pe_no, &err_type, &severity); | ||
1326 | if (rc != OPAL_SUCCESS) { | ||
1327 | pr_devel("%s: Invalid return value on " | ||
1328 | "PHB#%x (0x%lx) from opal_pci_next_error", | ||
1329 | __func__, hose->global_number, rc); | ||
1330 | continue; | ||
1331 | } | ||
1332 | |||
1333 | /* If the PHB doesn't have error, stop processing */ | ||
1334 | if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || | ||
1335 | be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { | ||
1336 | pr_devel("%s: No error found on PHB#%x\n", | ||
1337 | __func__, hose->global_number); | ||
1338 | continue; | ||
1339 | } | ||
1340 | |||
1341 | /* | ||
1342 | * Processing the error. We're expecting the error with | ||
1343 | * highest priority reported upon multiple errors on the | ||
1344 | * specific PHB. | ||
1345 | */ | ||
1346 | pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", | ||
1347 | __func__, be16_to_cpu(err_type), | ||
1348 | be16_to_cpu(severity), be64_to_cpu(frozen_pe_no), | ||
1349 | hose->global_number); | ||
1350 | switch (be16_to_cpu(err_type)) { | ||
1351 | case OPAL_EEH_IOC_ERROR: | ||
1352 | if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { | ||
1353 | pr_err("EEH: dead IOC detected\n"); | ||
1354 | ret = EEH_NEXT_ERR_DEAD_IOC; | ||
1355 | } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { | ||
1356 | pr_info("EEH: IOC informative error " | ||
1357 | "detected\n"); | ||
1358 | pnv_eeh_get_and_dump_hub_diag(hose); | ||
1359 | ret = EEH_NEXT_ERR_NONE; | ||
1360 | } | ||
1361 | |||
1362 | break; | ||
1363 | case OPAL_EEH_PHB_ERROR: | ||
1364 | if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { | ||
1365 | *pe = phb_pe; | ||
1366 | pr_err("EEH: dead PHB#%x detected, " | ||
1367 | "location: %s\n", | ||
1368 | hose->global_number, | ||
1369 | eeh_pe_loc_get(phb_pe)); | ||
1370 | ret = EEH_NEXT_ERR_DEAD_PHB; | ||
1371 | } else if (be16_to_cpu(severity) == | ||
1372 | OPAL_EEH_SEV_PHB_FENCED) { | ||
1373 | *pe = phb_pe; | ||
1374 | pr_err("EEH: Fenced PHB#%x detected, " | ||
1375 | "location: %s\n", | ||
1376 | hose->global_number, | ||
1377 | eeh_pe_loc_get(phb_pe)); | ||
1378 | ret = EEH_NEXT_ERR_FENCED_PHB; | ||
1379 | } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { | ||
1380 | pr_info("EEH: PHB#%x informative error " | ||
1381 | "detected, location: %s\n", | ||
1382 | hose->global_number, | ||
1383 | eeh_pe_loc_get(phb_pe)); | ||
1384 | pnv_eeh_get_phb_diag(phb_pe); | ||
1385 | pnv_pci_dump_phb_diag_data(hose, phb_pe->data); | ||
1386 | ret = EEH_NEXT_ERR_NONE; | ||
1387 | } | ||
1388 | |||
1389 | break; | ||
1390 | case OPAL_EEH_PE_ERROR: | ||
1391 | /* | ||
1392 | * If we can't find the corresponding PE, we | ||
1393 | * just try to unfreeze. | ||
1394 | */ | ||
1395 | if (pnv_eeh_get_pe(hose, | ||
1396 | be64_to_cpu(frozen_pe_no), pe)) { | ||
1397 | /* Try best to clear it */ | ||
1398 | pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", | ||
1399 | hose->global_number, frozen_pe_no); | ||
1400 | pr_info("EEH: PHB location: %s\n", | ||
1401 | eeh_pe_loc_get(phb_pe)); | ||
1402 | opal_pci_eeh_freeze_clear(phb->opal_id, | ||
1403 | frozen_pe_no, | ||
1404 | OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); | ||
1405 | ret = EEH_NEXT_ERR_NONE; | ||
1406 | } else if ((*pe)->state & EEH_PE_ISOLATED || | ||
1407 | eeh_pe_passed(*pe)) { | ||
1408 | ret = EEH_NEXT_ERR_NONE; | ||
1409 | } else { | ||
1410 | pr_err("EEH: Frozen PE#%x " | ||
1411 | "on PHB#%x detected\n", | ||
1412 | (*pe)->addr, | ||
1413 | (*pe)->phb->global_number); | ||
1414 | pr_err("EEH: PE location: %s, " | ||
1415 | "PHB location: %s\n", | ||
1416 | eeh_pe_loc_get(*pe), | ||
1417 | eeh_pe_loc_get(phb_pe)); | ||
1418 | ret = EEH_NEXT_ERR_FROZEN_PE; | ||
1419 | } | ||
1420 | |||
1421 | break; | ||
1422 | default: | ||
1423 | pr_warn("%s: Unexpected error type %d\n", | ||
1424 | __func__, be16_to_cpu(err_type)); | ||
1425 | } | ||
465 | 1426 | ||
466 | if (phb && phb->eeh_ops->next_error) | 1427 | /* |
467 | return phb->eeh_ops->next_error(pe); | 1428 | * EEH core will try recover from fenced PHB or |
1429 | * frozen PE. In the time for frozen PE, EEH core | ||
1430 | * enable IO path for that before collecting logs, | ||
1431 | * but it ruins the site. So we have to dump the | ||
1432 | * log in advance here. | ||
1433 | */ | ||
1434 | if ((ret == EEH_NEXT_ERR_FROZEN_PE || | ||
1435 | ret == EEH_NEXT_ERR_FENCED_PHB) && | ||
1436 | !((*pe)->state & EEH_PE_ISOLATED)) { | ||
1437 | eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); | ||
1438 | pnv_eeh_get_phb_diag(*pe); | ||
1439 | |||
1440 | if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) | ||
1441 | pnv_pci_dump_phb_diag_data((*pe)->phb, | ||
1442 | (*pe)->data); | ||
1443 | } | ||
468 | 1444 | ||
469 | return -EEXIST; | 1445 | /* |
1446 | * We probably have the frozen parent PE out there and | ||
1447 | * we need have to handle frozen parent PE firstly. | ||
1448 | */ | ||
1449 | if (ret == EEH_NEXT_ERR_FROZEN_PE) { | ||
1450 | parent_pe = (*pe)->parent; | ||
1451 | while (parent_pe) { | ||
1452 | /* Hit the ceiling ? */ | ||
1453 | if (parent_pe->type & EEH_PE_PHB) | ||
1454 | break; | ||
1455 | |||
1456 | /* Frozen parent PE ? */ | ||
1457 | state = eeh_ops->get_state(parent_pe, NULL); | ||
1458 | if (state > 0 && | ||
1459 | (state & active_flags) != active_flags) | ||
1460 | *pe = parent_pe; | ||
1461 | |||
1462 | /* Next parent level */ | ||
1463 | parent_pe = parent_pe->parent; | ||
1464 | } | ||
1465 | |||
1466 | /* We possibly migrate to another PE */ | ||
1467 | eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); | ||
1468 | } | ||
1469 | |||
1470 | /* | ||
1471 | * If we have no errors on the specific PHB or only | ||
1472 | * informative error there, we continue poking it. | ||
1473 | * Otherwise, we need actions to be taken by upper | ||
1474 | * layer. | ||
1475 | */ | ||
1476 | if (ret > EEH_NEXT_ERR_INF) | ||
1477 | break; | ||
1478 | } | ||
1479 | |||
1480 | return ret; | ||
470 | } | 1481 | } |
471 | 1482 | ||
472 | static int powernv_eeh_restore_config(struct device_node *dn) | 1483 | static int pnv_eeh_restore_config(struct pci_dn *pdn) |
473 | { | 1484 | { |
474 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | 1485 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); |
475 | struct pnv_phb *phb; | 1486 | struct pnv_phb *phb; |
476 | s64 ret; | 1487 | s64 ret; |
477 | 1488 | ||
@@ -490,24 +1501,23 @@ static int powernv_eeh_restore_config(struct device_node *dn) | |||
490 | return 0; | 1501 | return 0; |
491 | } | 1502 | } |
492 | 1503 | ||
493 | static struct eeh_ops powernv_eeh_ops = { | 1504 | static struct eeh_ops pnv_eeh_ops = { |
494 | .name = "powernv", | 1505 | .name = "powernv", |
495 | .init = powernv_eeh_init, | 1506 | .init = pnv_eeh_init, |
496 | .post_init = powernv_eeh_post_init, | 1507 | .post_init = pnv_eeh_post_init, |
497 | .of_probe = NULL, | 1508 | .probe = pnv_eeh_probe, |
498 | .dev_probe = powernv_eeh_dev_probe, | 1509 | .set_option = pnv_eeh_set_option, |
499 | .set_option = powernv_eeh_set_option, | 1510 | .get_pe_addr = pnv_eeh_get_pe_addr, |
500 | .get_pe_addr = powernv_eeh_get_pe_addr, | 1511 | .get_state = pnv_eeh_get_state, |
501 | .get_state = powernv_eeh_get_state, | 1512 | .reset = pnv_eeh_reset, |
502 | .reset = powernv_eeh_reset, | 1513 | .wait_state = pnv_eeh_wait_state, |
503 | .wait_state = powernv_eeh_wait_state, | 1514 | .get_log = pnv_eeh_get_log, |
504 | .get_log = powernv_eeh_get_log, | 1515 | .configure_bridge = pnv_eeh_configure_bridge, |
505 | .configure_bridge = powernv_eeh_configure_bridge, | 1516 | .err_inject = pnv_eeh_err_inject, |
506 | .err_inject = powernv_eeh_err_inject, | 1517 | .read_config = pnv_eeh_read_config, |
507 | .read_config = powernv_eeh_read_config, | 1518 | .write_config = pnv_eeh_write_config, |
508 | .write_config = powernv_eeh_write_config, | 1519 | .next_error = pnv_eeh_next_error, |
509 | .next_error = powernv_eeh_next_error, | 1520 | .restore_config = pnv_eeh_restore_config |
510 | .restore_config = powernv_eeh_restore_config | ||
511 | }; | 1521 | }; |
512 | 1522 | ||
513 | /** | 1523 | /** |
@@ -521,7 +1531,7 @@ static int __init eeh_powernv_init(void) | |||
521 | int ret = -EINVAL; | 1531 | int ret = -EINVAL; |
522 | 1532 | ||
523 | eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE); | 1533 | eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE); |
524 | ret = eeh_ops_register(&powernv_eeh_ops); | 1534 | ret = eeh_ops_register(&pnv_eeh_ops); |
525 | if (!ret) | 1535 | if (!ret) |
526 | pr_info("EEH: PowerNV platform initialized\n"); | 1536 | pr_info("EEH: PowerNV platform initialized\n"); |
527 | else | 1537 | else |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6c9ff2b95119..76b344125cef 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -1777,7 +1777,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, | |||
1777 | region.start += phb->ioda.io_segsize; | 1777 | region.start += phb->ioda.io_segsize; |
1778 | index++; | 1778 | index++; |
1779 | } | 1779 | } |
1780 | } else if (res->flags & IORESOURCE_MEM) { | 1780 | } else if ((res->flags & IORESOURCE_MEM) && |
1781 | !pnv_pci_is_mem_pref_64(res->flags)) { | ||
1781 | region.start = res->start - | 1782 | region.start = res->start - |
1782 | hose->mem_offset[0] - | 1783 | hose->mem_offset[0] - |
1783 | phb->ioda.m32_pci_base; | 1784 | phb->ioda.m32_pci_base; |
@@ -2078,9 +2079,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
2078 | phb->get_pe_state = pnv_ioda_get_pe_state; | 2079 | phb->get_pe_state = pnv_ioda_get_pe_state; |
2079 | phb->freeze_pe = pnv_ioda_freeze_pe; | 2080 | phb->freeze_pe = pnv_ioda_freeze_pe; |
2080 | phb->unfreeze_pe = pnv_ioda_unfreeze_pe; | 2081 | phb->unfreeze_pe = pnv_ioda_unfreeze_pe; |
2081 | #ifdef CONFIG_EEH | ||
2082 | phb->eeh_ops = &ioda_eeh_ops; | ||
2083 | #endif | ||
2084 | 2082 | ||
2085 | /* Setup RID -> PE mapping function */ | 2083 | /* Setup RID -> PE mapping function */ |
2086 | phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe; | 2084 | phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe; |
@@ -2121,8 +2119,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
2121 | */ | 2119 | */ |
2122 | if (is_kdump_kernel()) { | 2120 | if (is_kdump_kernel()) { |
2123 | pr_info(" Issue PHB reset ...\n"); | 2121 | pr_info(" Issue PHB reset ...\n"); |
2124 | ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); | 2122 | pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); |
2125 | ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); | 2123 | pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); |
2126 | } | 2124 | } |
2127 | 2125 | ||
2128 | /* Remove M64 resource if we can't configure it successfully */ | 2126 | /* Remove M64 resource if we can't configure it successfully */ |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 54323d6b5166..946aa3d62c3c 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -366,9 +366,9 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) | |||
366 | spin_unlock_irqrestore(&phb->lock, flags); | 366 | spin_unlock_irqrestore(&phb->lock, flags); |
367 | } | 367 | } |
368 | 368 | ||
369 | static void pnv_pci_config_check_eeh(struct pnv_phb *phb, | 369 | static void pnv_pci_config_check_eeh(struct pci_dn *pdn) |
370 | struct device_node *dn) | ||
371 | { | 370 | { |
371 | struct pnv_phb *phb = pdn->phb->private_data; | ||
372 | u8 fstate; | 372 | u8 fstate; |
373 | __be16 pcierr; | 373 | __be16 pcierr; |
374 | int pe_no; | 374 | int pe_no; |
@@ -379,7 +379,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb, | |||
379 | * setup that yet. So all ER errors should be mapped to | 379 | * setup that yet. So all ER errors should be mapped to |
380 | * reserved PE. | 380 | * reserved PE. |
381 | */ | 381 | */ |
382 | pe_no = PCI_DN(dn)->pe_number; | 382 | pe_no = pdn->pe_number; |
383 | if (pe_no == IODA_INVALID_PE) { | 383 | if (pe_no == IODA_INVALID_PE) { |
384 | if (phb->type == PNV_PHB_P5IOC2) | 384 | if (phb->type == PNV_PHB_P5IOC2) |
385 | pe_no = 0; | 385 | pe_no = 0; |
@@ -407,8 +407,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb, | |||
407 | } | 407 | } |
408 | 408 | ||
409 | cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n", | 409 | cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n", |
410 | (PCI_DN(dn)->busno << 8) | (PCI_DN(dn)->devfn), | 410 | (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); |
411 | pe_no, fstate); | ||
412 | 411 | ||
413 | /* Clear the frozen state if applicable */ | 412 | /* Clear the frozen state if applicable */ |
414 | if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || | 413 | if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || |
@@ -425,10 +424,9 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb, | |||
425 | } | 424 | } |
426 | } | 425 | } |
427 | 426 | ||
428 | int pnv_pci_cfg_read(struct device_node *dn, | 427 | int pnv_pci_cfg_read(struct pci_dn *pdn, |
429 | int where, int size, u32 *val) | 428 | int where, int size, u32 *val) |
430 | { | 429 | { |
431 | struct pci_dn *pdn = PCI_DN(dn); | ||
432 | struct pnv_phb *phb = pdn->phb->private_data; | 430 | struct pnv_phb *phb = pdn->phb->private_data; |
433 | u32 bdfn = (pdn->busno << 8) | pdn->devfn; | 431 | u32 bdfn = (pdn->busno << 8) | pdn->devfn; |
434 | s64 rc; | 432 | s64 rc; |
@@ -462,10 +460,9 @@ int pnv_pci_cfg_read(struct device_node *dn, | |||
462 | return PCIBIOS_SUCCESSFUL; | 460 | return PCIBIOS_SUCCESSFUL; |
463 | } | 461 | } |
464 | 462 | ||
465 | int pnv_pci_cfg_write(struct device_node *dn, | 463 | int pnv_pci_cfg_write(struct pci_dn *pdn, |
466 | int where, int size, u32 val) | 464 | int where, int size, u32 val) |
467 | { | 465 | { |
468 | struct pci_dn *pdn = PCI_DN(dn); | ||
469 | struct pnv_phb *phb = pdn->phb->private_data; | 466 | struct pnv_phb *phb = pdn->phb->private_data; |
470 | u32 bdfn = (pdn->busno << 8) | pdn->devfn; | 467 | u32 bdfn = (pdn->busno << 8) | pdn->devfn; |
471 | 468 | ||
@@ -489,18 +486,17 @@ int pnv_pci_cfg_write(struct device_node *dn, | |||
489 | } | 486 | } |
490 | 487 | ||
491 | #if CONFIG_EEH | 488 | #if CONFIG_EEH |
492 | static bool pnv_pci_cfg_check(struct pci_controller *hose, | 489 | static bool pnv_pci_cfg_check(struct pci_dn *pdn) |
493 | struct device_node *dn) | ||
494 | { | 490 | { |
495 | struct eeh_dev *edev = NULL; | 491 | struct eeh_dev *edev = NULL; |
496 | struct pnv_phb *phb = hose->private_data; | 492 | struct pnv_phb *phb = pdn->phb->private_data; |
497 | 493 | ||
498 | /* EEH not enabled ? */ | 494 | /* EEH not enabled ? */ |
499 | if (!(phb->flags & PNV_PHB_FLAG_EEH)) | 495 | if (!(phb->flags & PNV_PHB_FLAG_EEH)) |
500 | return true; | 496 | return true; |
501 | 497 | ||
502 | /* PE reset or device removed ? */ | 498 | /* PE reset or device removed ? */ |
503 | edev = of_node_to_eeh_dev(dn); | 499 | edev = pdn->edev; |
504 | if (edev) { | 500 | if (edev) { |
505 | if (edev->pe && | 501 | if (edev->pe && |
506 | (edev->pe->state & EEH_PE_CFG_BLOCKED)) | 502 | (edev->pe->state & EEH_PE_CFG_BLOCKED)) |
@@ -513,8 +509,7 @@ static bool pnv_pci_cfg_check(struct pci_controller *hose, | |||
513 | return true; | 509 | return true; |
514 | } | 510 | } |
515 | #else | 511 | #else |
516 | static inline pnv_pci_cfg_check(struct pci_controller *hose, | 512 | static inline pnv_pci_cfg_check(struct pci_dn *pdn) |
517 | struct device_node *dn) | ||
518 | { | 513 | { |
519 | return true; | 514 | return true; |
520 | } | 515 | } |
@@ -524,32 +519,26 @@ static int pnv_pci_read_config(struct pci_bus *bus, | |||
524 | unsigned int devfn, | 519 | unsigned int devfn, |
525 | int where, int size, u32 *val) | 520 | int where, int size, u32 *val) |
526 | { | 521 | { |
527 | struct device_node *dn, *busdn = pci_bus_to_OF_node(bus); | ||
528 | struct pci_dn *pdn; | 522 | struct pci_dn *pdn; |
529 | struct pnv_phb *phb; | 523 | struct pnv_phb *phb; |
530 | bool found = false; | ||
531 | int ret; | 524 | int ret; |
532 | 525 | ||
533 | *val = 0xFFFFFFFF; | 526 | *val = 0xFFFFFFFF; |
534 | for (dn = busdn->child; dn; dn = dn->sibling) { | 527 | pdn = pci_get_pdn_by_devfn(bus, devfn); |
535 | pdn = PCI_DN(dn); | 528 | if (!pdn) |
536 | if (pdn && pdn->devfn == devfn) { | 529 | return PCIBIOS_DEVICE_NOT_FOUND; |
537 | phb = pdn->phb->private_data; | ||
538 | found = true; | ||
539 | break; | ||
540 | } | ||
541 | } | ||
542 | 530 | ||
543 | if (!found || !pnv_pci_cfg_check(pdn->phb, dn)) | 531 | if (!pnv_pci_cfg_check(pdn)) |
544 | return PCIBIOS_DEVICE_NOT_FOUND; | 532 | return PCIBIOS_DEVICE_NOT_FOUND; |
545 | 533 | ||
546 | ret = pnv_pci_cfg_read(dn, where, size, val); | 534 | ret = pnv_pci_cfg_read(pdn, where, size, val); |
547 | if (phb->flags & PNV_PHB_FLAG_EEH) { | 535 | phb = pdn->phb->private_data; |
536 | if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { | ||
548 | if (*val == EEH_IO_ERROR_VALUE(size) && | 537 | if (*val == EEH_IO_ERROR_VALUE(size) && |
549 | eeh_dev_check_failure(of_node_to_eeh_dev(dn))) | 538 | eeh_dev_check_failure(pdn->edev)) |
550 | return PCIBIOS_DEVICE_NOT_FOUND; | 539 | return PCIBIOS_DEVICE_NOT_FOUND; |
551 | } else { | 540 | } else { |
552 | pnv_pci_config_check_eeh(phb, dn); | 541 | pnv_pci_config_check_eeh(pdn); |
553 | } | 542 | } |
554 | 543 | ||
555 | return ret; | 544 | return ret; |
@@ -559,27 +548,21 @@ static int pnv_pci_write_config(struct pci_bus *bus, | |||
559 | unsigned int devfn, | 548 | unsigned int devfn, |
560 | int where, int size, u32 val) | 549 | int where, int size, u32 val) |
561 | { | 550 | { |
562 | struct device_node *dn, *busdn = pci_bus_to_OF_node(bus); | ||
563 | struct pci_dn *pdn; | 551 | struct pci_dn *pdn; |
564 | struct pnv_phb *phb; | 552 | struct pnv_phb *phb; |
565 | bool found = false; | ||
566 | int ret; | 553 | int ret; |
567 | 554 | ||
568 | for (dn = busdn->child; dn; dn = dn->sibling) { | 555 | pdn = pci_get_pdn_by_devfn(bus, devfn); |
569 | pdn = PCI_DN(dn); | 556 | if (!pdn) |
570 | if (pdn && pdn->devfn == devfn) { | 557 | return PCIBIOS_DEVICE_NOT_FOUND; |
571 | phb = pdn->phb->private_data; | ||
572 | found = true; | ||
573 | break; | ||
574 | } | ||
575 | } | ||
576 | 558 | ||
577 | if (!found || !pnv_pci_cfg_check(pdn->phb, dn)) | 559 | if (!pnv_pci_cfg_check(pdn)) |
578 | return PCIBIOS_DEVICE_NOT_FOUND; | 560 | return PCIBIOS_DEVICE_NOT_FOUND; |
579 | 561 | ||
580 | ret = pnv_pci_cfg_write(dn, where, size, val); | 562 | ret = pnv_pci_cfg_write(pdn, where, size, val); |
563 | phb = pdn->phb->private_data; | ||
581 | if (!(phb->flags & PNV_PHB_FLAG_EEH)) | 564 | if (!(phb->flags & PNV_PHB_FLAG_EEH)) |
582 | pnv_pci_config_check_eeh(phb, dn); | 565 | pnv_pci_config_check_eeh(pdn); |
583 | 566 | ||
584 | return ret; | 567 | return ret; |
585 | } | 568 | } |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 6c02ff8dd69f..1f0cb66133a1 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -75,22 +75,6 @@ struct pnv_ioda_pe { | |||
75 | struct list_head list; | 75 | struct list_head list; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | /* IOC dependent EEH operations */ | ||
79 | #ifdef CONFIG_EEH | ||
80 | struct pnv_eeh_ops { | ||
81 | int (*post_init)(struct pci_controller *hose); | ||
82 | int (*set_option)(struct eeh_pe *pe, int option); | ||
83 | int (*get_state)(struct eeh_pe *pe); | ||
84 | int (*reset)(struct eeh_pe *pe, int option); | ||
85 | int (*get_log)(struct eeh_pe *pe, int severity, | ||
86 | char *drv_log, unsigned long len); | ||
87 | int (*configure_bridge)(struct eeh_pe *pe); | ||
88 | int (*err_inject)(struct eeh_pe *pe, int type, int func, | ||
89 | unsigned long addr, unsigned long mask); | ||
90 | int (*next_error)(struct eeh_pe **pe); | ||
91 | }; | ||
92 | #endif /* CONFIG_EEH */ | ||
93 | |||
94 | #define PNV_PHB_FLAG_EEH (1 << 0) | 78 | #define PNV_PHB_FLAG_EEH (1 << 0) |
95 | 79 | ||
96 | struct pnv_phb { | 80 | struct pnv_phb { |
@@ -104,10 +88,6 @@ struct pnv_phb { | |||
104 | int initialized; | 88 | int initialized; |
105 | spinlock_t lock; | 89 | spinlock_t lock; |
106 | 90 | ||
107 | #ifdef CONFIG_EEH | ||
108 | struct pnv_eeh_ops *eeh_ops; | ||
109 | #endif | ||
110 | |||
111 | #ifdef CONFIG_DEBUG_FS | 91 | #ifdef CONFIG_DEBUG_FS |
112 | int has_dbgfs; | 92 | int has_dbgfs; |
113 | struct dentry *dbgfs; | 93 | struct dentry *dbgfs; |
@@ -213,15 +193,12 @@ struct pnv_phb { | |||
213 | }; | 193 | }; |
214 | 194 | ||
215 | extern struct pci_ops pnv_pci_ops; | 195 | extern struct pci_ops pnv_pci_ops; |
216 | #ifdef CONFIG_EEH | ||
217 | extern struct pnv_eeh_ops ioda_eeh_ops; | ||
218 | #endif | ||
219 | 196 | ||
220 | void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, | 197 | void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, |
221 | unsigned char *log_buff); | 198 | unsigned char *log_buff); |
222 | int pnv_pci_cfg_read(struct device_node *dn, | 199 | int pnv_pci_cfg_read(struct pci_dn *pdn, |
223 | int where, int size, u32 *val); | 200 | int where, int size, u32 *val); |
224 | int pnv_pci_cfg_write(struct device_node *dn, | 201 | int pnv_pci_cfg_write(struct pci_dn *pdn, |
225 | int where, int size, u32 val); | 202 | int where, int size, u32 val); |
226 | extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, | 203 | extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, |
227 | void *tce_mem, u64 tce_size, | 204 | void *tce_mem, u64 tce_size, |
@@ -232,6 +209,6 @@ extern void pnv_pci_init_ioda2_phb(struct device_node *np); | |||
232 | extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, | 209 | extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, |
233 | __be64 *startp, __be64 *endp, bool rm); | 210 | __be64 *startp, __be64 *endp, bool rm); |
234 | extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); | 211 | extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); |
235 | extern int ioda_eeh_phb_reset(struct pci_controller *hose, int option); | 212 | extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); |
236 | 213 | ||
237 | #endif /* __POWERNV_PCI_H */ | 214 | #endif /* __POWERNV_PCI_H */ |
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index a6c7e19f5eb3..2039397cc75d 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c | |||
@@ -118,9 +118,8 @@ static int pseries_eeh_init(void) | |||
118 | return 0; | 118 | return 0; |
119 | } | 119 | } |
120 | 120 | ||
121 | static int pseries_eeh_cap_start(struct device_node *dn) | 121 | static int pseries_eeh_cap_start(struct pci_dn *pdn) |
122 | { | 122 | { |
123 | struct pci_dn *pdn = PCI_DN(dn); | ||
124 | u32 status; | 123 | u32 status; |
125 | 124 | ||
126 | if (!pdn) | 125 | if (!pdn) |
@@ -134,10 +133,9 @@ static int pseries_eeh_cap_start(struct device_node *dn) | |||
134 | } | 133 | } |
135 | 134 | ||
136 | 135 | ||
137 | static int pseries_eeh_find_cap(struct device_node *dn, int cap) | 136 | static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap) |
138 | { | 137 | { |
139 | struct pci_dn *pdn = PCI_DN(dn); | 138 | int pos = pseries_eeh_cap_start(pdn); |
140 | int pos = pseries_eeh_cap_start(dn); | ||
141 | int cnt = 48; /* Maximal number of capabilities */ | 139 | int cnt = 48; /* Maximal number of capabilities */ |
142 | u32 id; | 140 | u32 id; |
143 | 141 | ||
@@ -160,10 +158,9 @@ static int pseries_eeh_find_cap(struct device_node *dn, int cap) | |||
160 | return 0; | 158 | return 0; |
161 | } | 159 | } |
162 | 160 | ||
163 | static int pseries_eeh_find_ecap(struct device_node *dn, int cap) | 161 | static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap) |
164 | { | 162 | { |
165 | struct pci_dn *pdn = PCI_DN(dn); | 163 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); |
166 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | ||
167 | u32 header; | 164 | u32 header; |
168 | int pos = 256; | 165 | int pos = 256; |
169 | int ttl = (4096 - 256) / 8; | 166 | int ttl = (4096 - 256) / 8; |
@@ -191,53 +188,44 @@ static int pseries_eeh_find_ecap(struct device_node *dn, int cap) | |||
191 | } | 188 | } |
192 | 189 | ||
193 | /** | 190 | /** |
194 | * pseries_eeh_of_probe - EEH probe on the given device | 191 | * pseries_eeh_probe - EEH probe on the given device |
195 | * @dn: OF node | 192 | * @pdn: PCI device node |
196 | * @flag: Unused | 193 | * @data: Unused |
197 | * | 194 | * |
198 | * When EEH module is installed during system boot, all PCI devices | 195 | * When EEH module is installed during system boot, all PCI devices |
199 | * are checked one by one to see if it supports EEH. The function | 196 | * are checked one by one to see if it supports EEH. The function |
200 | * is introduced for the purpose. | 197 | * is introduced for the purpose. |
201 | */ | 198 | */ |
202 | static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) | 199 | static void *pseries_eeh_probe(struct pci_dn *pdn, void *data) |
203 | { | 200 | { |
204 | struct eeh_dev *edev; | 201 | struct eeh_dev *edev; |
205 | struct eeh_pe pe; | 202 | struct eeh_pe pe; |
206 | struct pci_dn *pdn = PCI_DN(dn); | ||
207 | const __be32 *classp, *vendorp, *devicep; | ||
208 | u32 class_code; | ||
209 | const __be32 *regs; | ||
210 | u32 pcie_flags; | 203 | u32 pcie_flags; |
211 | int enable = 0; | 204 | int enable = 0; |
212 | int ret; | 205 | int ret; |
213 | 206 | ||
214 | /* Retrieve OF node and eeh device */ | 207 | /* Retrieve OF node and eeh device */ |
215 | edev = of_node_to_eeh_dev(dn); | 208 | edev = pdn_to_eeh_dev(pdn); |
216 | if (edev->pe || !of_device_is_available(dn)) | 209 | if (!edev || edev->pe) |
217 | return NULL; | 210 | return NULL; |
218 | 211 | ||
219 | /* Retrieve class/vendor/device IDs */ | 212 | /* Check class/vendor/device IDs */ |
220 | classp = of_get_property(dn, "class-code", NULL); | 213 | if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code) |
221 | vendorp = of_get_property(dn, "vendor-id", NULL); | ||
222 | devicep = of_get_property(dn, "device-id", NULL); | ||
223 | |||
224 | /* Skip for bad OF node or PCI-ISA bridge */ | ||
225 | if (!classp || !vendorp || !devicep) | ||
226 | return NULL; | ||
227 | if (dn->type && !strcmp(dn->type, "isa")) | ||
228 | return NULL; | 214 | return NULL; |
229 | 215 | ||
230 | class_code = of_read_number(classp, 1); | 216 | /* Skip for PCI-ISA bridge */ |
217 | if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) | ||
218 | return NULL; | ||
231 | 219 | ||
232 | /* | 220 | /* |
233 | * Update class code and mode of eeh device. We need | 221 | * Update class code and mode of eeh device. We need |
234 | * correctly reflects that current device is root port | 222 | * correctly reflects that current device is root port |
235 | * or PCIe switch downstream port. | 223 | * or PCIe switch downstream port. |
236 | */ | 224 | */ |
237 | edev->class_code = class_code; | 225 | edev->class_code = pdn->class_code; |
238 | edev->pcix_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_PCIX); | 226 | edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); |
239 | edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP); | 227 | edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP); |
240 | edev->aer_cap = pseries_eeh_find_ecap(dn, PCI_EXT_CAP_ID_ERR); | 228 | edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); |
241 | edev->mode &= 0xFFFFFF00; | 229 | edev->mode &= 0xFFFFFF00; |
242 | if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { | 230 | if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { |
243 | edev->mode |= EEH_DEV_BRIDGE; | 231 | edev->mode |= EEH_DEV_BRIDGE; |
@@ -252,24 +240,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) | |||
252 | } | 240 | } |
253 | } | 241 | } |
254 | 242 | ||
255 | /* Retrieve the device address */ | ||
256 | regs = of_get_property(dn, "reg", NULL); | ||
257 | if (!regs) { | ||
258 | pr_warn("%s: OF node property %s::reg not found\n", | ||
259 | __func__, dn->full_name); | ||
260 | return NULL; | ||
261 | } | ||
262 | |||
263 | /* Initialize the fake PE */ | 243 | /* Initialize the fake PE */ |
264 | memset(&pe, 0, sizeof(struct eeh_pe)); | 244 | memset(&pe, 0, sizeof(struct eeh_pe)); |
265 | pe.phb = edev->phb; | 245 | pe.phb = edev->phb; |
266 | pe.config_addr = of_read_number(regs, 1); | 246 | pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8); |
267 | 247 | ||
268 | /* Enable EEH on the device */ | 248 | /* Enable EEH on the device */ |
269 | ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); | 249 | ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); |
270 | if (!ret) { | 250 | if (!ret) { |
271 | edev->config_addr = of_read_number(regs, 1); | ||
272 | /* Retrieve PE address */ | 251 | /* Retrieve PE address */ |
252 | edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8); | ||
273 | edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); | 253 | edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); |
274 | pe.addr = edev->pe_config_addr; | 254 | pe.addr = edev->pe_config_addr; |
275 | 255 | ||
@@ -285,16 +265,17 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) | |||
285 | eeh_add_flag(EEH_ENABLED); | 265 | eeh_add_flag(EEH_ENABLED); |
286 | eeh_add_to_parent_pe(edev); | 266 | eeh_add_to_parent_pe(edev); |
287 | 267 | ||
288 | pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", | 268 | pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n", |
289 | __func__, dn->full_name, pe.phb->global_number, | 269 | __func__, pdn->busno, PCI_SLOT(pdn->devfn), |
290 | pe.addr, pe.config_addr); | 270 | PCI_FUNC(pdn->devfn), pe.phb->global_number, |
291 | } else if (dn->parent && of_node_to_eeh_dev(dn->parent) && | 271 | pe.addr); |
292 | (of_node_to_eeh_dev(dn->parent))->pe) { | 272 | } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) && |
273 | (pdn_to_eeh_dev(pdn->parent))->pe) { | ||
293 | /* This device doesn't support EEH, but it may have an | 274 | /* This device doesn't support EEH, but it may have an |
294 | * EEH parent, in which case we mark it as supported. | 275 | * EEH parent, in which case we mark it as supported. |
295 | */ | 276 | */ |
296 | edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; | 277 | edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr; |
297 | edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr; | 278 | edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr; |
298 | eeh_add_to_parent_pe(edev); | 279 | eeh_add_to_parent_pe(edev); |
299 | } | 280 | } |
300 | } | 281 | } |
@@ -670,45 +651,36 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) | |||
670 | 651 | ||
671 | /** | 652 | /** |
672 | * pseries_eeh_read_config - Read PCI config space | 653 | * pseries_eeh_read_config - Read PCI config space |
673 | * @dn: device node | 654 | * @pdn: PCI device node |
674 | * @where: PCI address | 655 | * @where: PCI address |
675 | * @size: size to read | 656 | * @size: size to read |
676 | * @val: return value | 657 | * @val: return value |
677 | * | 658 | * |
678 | * Read config space from the speicifed device | 659 | * Read config space from the speicifed device |
679 | */ | 660 | */ |
680 | static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val) | 661 | static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val) |
681 | { | 662 | { |
682 | struct pci_dn *pdn; | ||
683 | |||
684 | pdn = PCI_DN(dn); | ||
685 | |||
686 | return rtas_read_config(pdn, where, size, val); | 663 | return rtas_read_config(pdn, where, size, val); |
687 | } | 664 | } |
688 | 665 | ||
689 | /** | 666 | /** |
690 | * pseries_eeh_write_config - Write PCI config space | 667 | * pseries_eeh_write_config - Write PCI config space |
691 | * @dn: device node | 668 | * @pdn: PCI device node |
692 | * @where: PCI address | 669 | * @where: PCI address |
693 | * @size: size to write | 670 | * @size: size to write |
694 | * @val: value to be written | 671 | * @val: value to be written |
695 | * | 672 | * |
696 | * Write config space to the specified device | 673 | * Write config space to the specified device |
697 | */ | 674 | */ |
698 | static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val) | 675 | static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val) |
699 | { | 676 | { |
700 | struct pci_dn *pdn; | ||
701 | |||
702 | pdn = PCI_DN(dn); | ||
703 | |||
704 | return rtas_write_config(pdn, where, size, val); | 677 | return rtas_write_config(pdn, where, size, val); |
705 | } | 678 | } |
706 | 679 | ||
707 | static struct eeh_ops pseries_eeh_ops = { | 680 | static struct eeh_ops pseries_eeh_ops = { |
708 | .name = "pseries", | 681 | .name = "pseries", |
709 | .init = pseries_eeh_init, | 682 | .init = pseries_eeh_init, |
710 | .of_probe = pseries_eeh_of_probe, | 683 | .probe = pseries_eeh_probe, |
711 | .dev_probe = NULL, | ||
712 | .set_option = pseries_eeh_set_option, | 684 | .set_option = pseries_eeh_set_option, |
713 | .get_pe_addr = pseries_eeh_get_pe_addr, | 685 | .get_pe_addr = pseries_eeh_get_pe_addr, |
714 | .get_state = pseries_eeh_get_state, | 686 | .get_state = pseries_eeh_get_state, |
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 691a154c286d..c8d24f9a6948 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -195,6 +195,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | |||
195 | static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) | 195 | static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) |
196 | { | 196 | { |
197 | struct device_node *dn; | 197 | struct device_node *dn; |
198 | struct pci_dn *pdn; | ||
198 | struct eeh_dev *edev; | 199 | struct eeh_dev *edev; |
199 | 200 | ||
200 | /* Found our PE and assume 8 at that point. */ | 201 | /* Found our PE and assume 8 at that point. */ |
@@ -204,10 +205,11 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) | |||
204 | return NULL; | 205 | return NULL; |
205 | 206 | ||
206 | /* Get the top level device in the PE */ | 207 | /* Get the top level device in the PE */ |
207 | edev = of_node_to_eeh_dev(dn); | 208 | edev = pdn_to_eeh_dev(PCI_DN(dn)); |
208 | if (edev->pe) | 209 | if (edev->pe) |
209 | edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); | 210 | edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); |
210 | dn = eeh_dev_to_of_node(edev); | 211 | pdn = eeh_dev_to_pdn(edev); |
212 | dn = pdn ? pdn->node : NULL; | ||
211 | if (!dn) | 213 | if (!dn) |
212 | return NULL; | 214 | return NULL; |
213 | 215 | ||
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 89e23811199c..f735f4fee48c 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c | |||
@@ -82,7 +82,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn) | |||
82 | eeh_dev_phb_init_dynamic(phb); | 82 | eeh_dev_phb_init_dynamic(phb); |
83 | 83 | ||
84 | if (dn->child) | 84 | if (dn->child) |
85 | eeh_add_device_tree_early(dn); | 85 | eeh_add_device_tree_early(PCI_DN(dn)); |
86 | 86 | ||
87 | pcibios_scan_phb(phb); | 87 | pcibios_scan_phb(phb); |
88 | pcibios_finish_adding_to_bus(phb->bus); | 88 | pcibios_finish_adding_to_bus(phb->bus); |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index e445b6701f50..70304070a260 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -265,7 +265,7 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act | |||
265 | update_dn_pci_info(np, pci->phb); | 265 | update_dn_pci_info(np, pci->phb); |
266 | 266 | ||
267 | /* Create EEH device for the OF node */ | 267 | /* Create EEH device for the OF node */ |
268 | eeh_dev_init(np, pci->phb); | 268 | eeh_dev_init(PCI_DN(np), pci->phb); |
269 | } | 269 | } |
270 | break; | 270 | break; |
271 | default: | 271 | default: |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 238482495e81..77d6453f6a3e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -2523,9 +2523,7 @@ int efx_try_recovery(struct efx_nic *efx) | |||
2523 | * schedule a 'recover or reset', leading to this recovery handler. | 2523 | * schedule a 'recover or reset', leading to this recovery handler. |
2524 | * Manually call the eeh failure check function. | 2524 | * Manually call the eeh failure check function. |
2525 | */ | 2525 | */ |
2526 | struct eeh_dev *eehdev = | 2526 | struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); |
2527 | of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); | ||
2528 | |||
2529 | if (eeh_dev_check_failure(eehdev)) { | 2527 | if (eeh_dev_check_failure(eehdev)) { |
2530 | /* The EEH mechanisms will handle the error and reset the | 2528 | /* The EEH mechanisms will handle the error and reset the |
2531 | * device if necessary. | 2529 | * device if necessary. |
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 3583f0208a6e..f12c811938d2 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c | |||
@@ -205,8 +205,7 @@ static int siena_map_reset_flags(u32 *flags) | |||
205 | */ | 205 | */ |
206 | static void siena_monitor(struct efx_nic *efx) | 206 | static void siena_monitor(struct efx_nic *efx) |
207 | { | 207 | { |
208 | struct eeh_dev *eehdev = | 208 | struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); |
209 | of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); | ||
210 | 209 | ||
211 | eeh_dev_check_failure(eehdev); | 210 | eeh_dev_check_failure(eehdev); |
212 | } | 211 | } |
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 7660232ef460..e12bafdc42e0 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c | |||
@@ -146,7 +146,7 @@ static void dlpar_pci_add_bus(struct device_node *dn) | |||
146 | struct pci_controller *phb = pdn->phb; | 146 | struct pci_controller *phb = pdn->phb; |
147 | struct pci_dev *dev = NULL; | 147 | struct pci_dev *dev = NULL; |
148 | 148 | ||
149 | eeh_add_device_tree_early(dn); | 149 | eeh_add_device_tree_early(pdn); |
150 | 150 | ||
151 | /* Add EADS device to PHB bus, adding new entry to bus->devices */ | 151 | /* Add EADS device to PHB bus, adding new entry to bus->devices */ |
152 | dev = of_create_pci_dev(dn, phb->bus, pdn->devfn); | 152 | dev = of_create_pci_dev(dn, phb->bus, pdn->devfn); |