diff options
Diffstat (limited to 'arch')
78 files changed, 1877 insertions, 1046 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 352f416269ce..98e513b62709 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -239,6 +239,9 @@ config PPC_OF_PLATFORM_PCI | |||
239 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC | 239 | config ARCH_SUPPORTS_DEBUG_PAGEALLOC |
240 | def_bool y | 240 | def_bool y |
241 | 241 | ||
242 | config ARCH_SUPPORTS_UPROBES | ||
243 | def_bool y | ||
244 | |||
242 | config PPC_ADV_DEBUG_REGS | 245 | config PPC_ADV_DEBUG_REGS |
243 | bool | 246 | bool |
244 | depends on 40x || BOOKE | 247 | depends on 40x || BOOKE |
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index b7d833382be4..6a15c968d214 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -107,6 +107,7 @@ src-boot := $(addprefix $(obj)/, $(src-boot)) | |||
107 | obj-boot := $(addsuffix .o, $(basename $(src-boot))) | 107 | obj-boot := $(addsuffix .o, $(basename $(src-boot))) |
108 | obj-wlib := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-wlib)))) | 108 | obj-wlib := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-wlib)))) |
109 | obj-plat := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-plat)))) | 109 | obj-plat := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-plat)))) |
110 | obj-plat: $(libfdt) | ||
110 | 111 | ||
111 | quiet_cmd_copy_zlib = COPY $@ | 112 | quiet_cmd_copy_zlib = COPY $@ |
112 | cmd_copy_zlib = sed "s@__used@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@ | 113 | cmd_copy_zlib = sed "s@__used@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@ |
diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h deleted file mode 100644 index 9d92ba04b033..000000000000 --- a/arch/powerpc/include/asm/abs_addr.h +++ /dev/null | |||
@@ -1,56 +0,0 @@ | |||
1 | #ifndef _ASM_POWERPC_ABS_ADDR_H | ||
2 | #define _ASM_POWERPC_ABS_ADDR_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | |||
6 | /* | ||
7 | * c 2001 PPC 64 Team, IBM Corp | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/memblock.h> | ||
16 | |||
17 | #include <asm/types.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/prom.h> | ||
20 | |||
21 | struct mschunks_map { | ||
22 | unsigned long num_chunks; | ||
23 | unsigned long chunk_size; | ||
24 | unsigned long chunk_shift; | ||
25 | unsigned long chunk_mask; | ||
26 | u32 *mapping; | ||
27 | }; | ||
28 | |||
29 | extern struct mschunks_map mschunks_map; | ||
30 | |||
31 | /* Chunks are 256 KB */ | ||
32 | #define MSCHUNKS_CHUNK_SHIFT (18) | ||
33 | #define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT) | ||
34 | #define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1) | ||
35 | |||
36 | static inline unsigned long chunk_to_addr(unsigned long chunk) | ||
37 | { | ||
38 | return chunk << MSCHUNKS_CHUNK_SHIFT; | ||
39 | } | ||
40 | |||
41 | static inline unsigned long addr_to_chunk(unsigned long addr) | ||
42 | { | ||
43 | return addr >> MSCHUNKS_CHUNK_SHIFT; | ||
44 | } | ||
45 | |||
46 | static inline unsigned long phys_to_abs(unsigned long pa) | ||
47 | { | ||
48 | return pa; | ||
49 | } | ||
50 | |||
51 | /* Convenience macros */ | ||
52 | #define virt_to_abs(va) phys_to_abs(__pa(va)) | ||
53 | #define abs_to_virt(aa) __va(aa) | ||
54 | |||
55 | #endif /* __KERNEL__ */ | ||
56 | #endif /* _ASM_POWERPC_ABS_ADDR_H */ | ||
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h index 716d2f089eb6..32de2577bb6d 100644 --- a/arch/powerpc/include/asm/debug.h +++ b/arch/powerpc/include/asm/debug.h | |||
@@ -44,7 +44,7 @@ static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | |||
44 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | 44 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | extern int set_dabr(unsigned long dabr); | 47 | extern int set_dabr(unsigned long dabr, unsigned long dabrx); |
48 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 48 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
49 | extern void do_send_trap(struct pt_regs *regs, unsigned long address, | 49 | extern void do_send_trap(struct pt_regs *regs, unsigned long address, |
50 | unsigned long error_code, int signal_code, int brkpt); | 50 | unsigned long error_code, int signal_code, int brkpt); |
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index d60f99814ffb..58c5ee61e700 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h | |||
@@ -32,27 +32,61 @@ struct device_node; | |||
32 | #ifdef CONFIG_EEH | 32 | #ifdef CONFIG_EEH |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * The struct is used to trace PE related EEH functionality. | ||
36 | * In theory, there will have one instance of the struct to | ||
37 | * be created against particular PE. In nature, PEs corelate | ||
38 | * to each other. the struct has to reflect that hierarchy in | ||
39 | * order to easily pick up those affected PEs when one particular | ||
40 | * PE has EEH errors. | ||
41 | * | ||
42 | * Also, one particular PE might be composed of PCI device, PCI | ||
43 | * bus and its subordinate components. The struct also need ship | ||
44 | * the information. Further more, one particular PE is only meaingful | ||
45 | * in the corresponding PHB. Therefore, the root PEs should be created | ||
46 | * against existing PHBs in on-to-one fashion. | ||
47 | */ | ||
48 | #define EEH_PE_PHB 1 /* PHB PE */ | ||
49 | #define EEH_PE_DEVICE 2 /* Device PE */ | ||
50 | #define EEH_PE_BUS 3 /* Bus PE */ | ||
51 | |||
52 | #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ | ||
53 | #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ | ||
54 | |||
55 | struct eeh_pe { | ||
56 | int type; /* PE type: PHB/Bus/Device */ | ||
57 | int state; /* PE EEH dependent mode */ | ||
58 | int config_addr; /* Traditional PCI address */ | ||
59 | int addr; /* PE configuration address */ | ||
60 | struct pci_controller *phb; /* Associated PHB */ | ||
61 | int check_count; /* Times of ignored error */ | ||
62 | int freeze_count; /* Times of froze up */ | ||
63 | int false_positives; /* Times of reported #ff's */ | ||
64 | struct eeh_pe *parent; /* Parent PE */ | ||
65 | struct list_head child_list; /* Link PE to the child list */ | ||
66 | struct list_head edevs; /* Link list of EEH devices */ | ||
67 | struct list_head child; /* Child PEs */ | ||
68 | }; | ||
69 | |||
70 | #define eeh_pe_for_each_dev(pe, edev) \ | ||
71 | list_for_each_entry(edev, &pe->edevs, list) | ||
72 | |||
73 | /* | ||
35 | * The struct is used to trace EEH state for the associated | 74 | * The struct is used to trace EEH state for the associated |
36 | * PCI device node or PCI device. In future, it might | 75 | * PCI device node or PCI device. In future, it might |
37 | * represent PE as well so that the EEH device to form | 76 | * represent PE as well so that the EEH device to form |
38 | * another tree except the currently existing tree of PCI | 77 | * another tree except the currently existing tree of PCI |
39 | * buses and PCI devices | 78 | * buses and PCI devices |
40 | */ | 79 | */ |
41 | #define EEH_MODE_SUPPORTED (1<<0) /* EEH supported on the device */ | 80 | #define EEH_DEV_IRQ_DISABLED (1<<0) /* Interrupt disabled */ |
42 | #define EEH_MODE_NOCHECK (1<<1) /* EEH check should be skipped */ | ||
43 | #define EEH_MODE_ISOLATED (1<<2) /* The device has been isolated */ | ||
44 | #define EEH_MODE_RECOVERING (1<<3) /* Recovering the device */ | ||
45 | #define EEH_MODE_IRQ_DISABLED (1<<4) /* Interrupt disabled */ | ||
46 | 81 | ||
47 | struct eeh_dev { | 82 | struct eeh_dev { |
48 | int mode; /* EEH mode */ | 83 | int mode; /* EEH mode */ |
49 | int class_code; /* Class code of the device */ | 84 | int class_code; /* Class code of the device */ |
50 | int config_addr; /* Config address */ | 85 | int config_addr; /* Config address */ |
51 | int pe_config_addr; /* PE config address */ | 86 | int pe_config_addr; /* PE config address */ |
52 | int check_count; /* Times of ignored error */ | ||
53 | int freeze_count; /* Times of froze up */ | ||
54 | int false_positives; /* Times of reported #ff's */ | ||
55 | u32 config_space[16]; /* Saved PCI config space */ | 87 | u32 config_space[16]; /* Saved PCI config space */ |
88 | struct eeh_pe *pe; /* Associated PE */ | ||
89 | struct list_head list; /* Form link list in the PE */ | ||
56 | struct pci_controller *phb; /* Associated PHB */ | 90 | struct pci_controller *phb; /* Associated PHB */ |
57 | struct device_node *dn; /* Associated device node */ | 91 | struct device_node *dn; /* Associated device node */ |
58 | struct pci_dev *pdev; /* Associated PCI device */ | 92 | struct pci_dev *pdev; /* Associated PCI device */ |
@@ -95,19 +129,51 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) | |||
95 | struct eeh_ops { | 129 | struct eeh_ops { |
96 | char *name; | 130 | char *name; |
97 | int (*init)(void); | 131 | int (*init)(void); |
98 | int (*set_option)(struct device_node *dn, int option); | 132 | void* (*of_probe)(struct device_node *dn, void *flag); |
99 | int (*get_pe_addr)(struct device_node *dn); | 133 | void* (*dev_probe)(struct pci_dev *dev, void *flag); |
100 | int (*get_state)(struct device_node *dn, int *state); | 134 | int (*set_option)(struct eeh_pe *pe, int option); |
101 | int (*reset)(struct device_node *dn, int option); | 135 | int (*get_pe_addr)(struct eeh_pe *pe); |
102 | int (*wait_state)(struct device_node *dn, int max_wait); | 136 | int (*get_state)(struct eeh_pe *pe, int *state); |
103 | int (*get_log)(struct device_node *dn, int severity, char *drv_log, unsigned long len); | 137 | int (*reset)(struct eeh_pe *pe, int option); |
104 | int (*configure_bridge)(struct device_node *dn); | 138 | int (*wait_state)(struct eeh_pe *pe, int max_wait); |
139 | int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len); | ||
140 | int (*configure_bridge)(struct eeh_pe *pe); | ||
105 | int (*read_config)(struct device_node *dn, int where, int size, u32 *val); | 141 | int (*read_config)(struct device_node *dn, int where, int size, u32 *val); |
106 | int (*write_config)(struct device_node *dn, int where, int size, u32 val); | 142 | int (*write_config)(struct device_node *dn, int where, int size, u32 val); |
107 | }; | 143 | }; |
108 | 144 | ||
109 | extern struct eeh_ops *eeh_ops; | 145 | extern struct eeh_ops *eeh_ops; |
110 | extern int eeh_subsystem_enabled; | 146 | extern int eeh_subsystem_enabled; |
147 | extern struct mutex eeh_mutex; | ||
148 | extern int eeh_probe_mode; | ||
149 | |||
150 | #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ | ||
151 | #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ | ||
152 | |||
153 | static inline void eeh_probe_mode_set(int flag) | ||
154 | { | ||
155 | eeh_probe_mode = flag; | ||
156 | } | ||
157 | |||
158 | static inline int eeh_probe_mode_devtree(void) | ||
159 | { | ||
160 | return (eeh_probe_mode == EEH_PROBE_MODE_DEVTREE); | ||
161 | } | ||
162 | |||
163 | static inline int eeh_probe_mode_dev(void) | ||
164 | { | ||
165 | return (eeh_probe_mode == EEH_PROBE_MODE_DEV); | ||
166 | } | ||
167 | |||
168 | static inline void eeh_lock(void) | ||
169 | { | ||
170 | mutex_lock(&eeh_mutex); | ||
171 | } | ||
172 | |||
173 | static inline void eeh_unlock(void) | ||
174 | { | ||
175 | mutex_unlock(&eeh_mutex); | ||
176 | } | ||
111 | 177 | ||
112 | /* | 178 | /* |
113 | * Max number of EEH freezes allowed before we consider the device | 179 | * Max number of EEH freezes allowed before we consider the device |
@@ -115,19 +181,23 @@ extern int eeh_subsystem_enabled; | |||
115 | */ | 181 | */ |
116 | #define EEH_MAX_ALLOWED_FREEZES 5 | 182 | #define EEH_MAX_ALLOWED_FREEZES 5 |
117 | 183 | ||
184 | typedef void *(*eeh_traverse_func)(void *data, void *flag); | ||
185 | int __devinit eeh_phb_pe_create(struct pci_controller *phb); | ||
186 | int eeh_add_to_parent_pe(struct eeh_dev *edev); | ||
187 | int eeh_rmv_from_parent_pe(struct eeh_dev *edev); | ||
188 | void *eeh_pe_dev_traverse(struct eeh_pe *root, | ||
189 | eeh_traverse_func fn, void *flag); | ||
190 | void eeh_pe_restore_bars(struct eeh_pe *pe); | ||
191 | struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); | ||
192 | |||
118 | void * __devinit eeh_dev_init(struct device_node *dn, void *data); | 193 | void * __devinit eeh_dev_init(struct device_node *dn, void *data); |
119 | void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb); | 194 | void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb); |
120 | void __init eeh_dev_phb_init(void); | ||
121 | void __init eeh_init(void); | ||
122 | #ifdef CONFIG_PPC_PSERIES | ||
123 | int __init eeh_pseries_init(void); | ||
124 | #endif | ||
125 | int __init eeh_ops_register(struct eeh_ops *ops); | 195 | int __init eeh_ops_register(struct eeh_ops *ops); |
126 | int __exit eeh_ops_unregister(const char *name); | 196 | int __exit eeh_ops_unregister(const char *name); |
127 | unsigned long eeh_check_failure(const volatile void __iomem *token, | 197 | unsigned long eeh_check_failure(const volatile void __iomem *token, |
128 | unsigned long val); | 198 | unsigned long val); |
129 | int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev); | 199 | int eeh_dev_check_failure(struct eeh_dev *edev); |
130 | void __init pci_addr_cache_build(void); | 200 | void __init eeh_addr_cache_build(void); |
131 | void eeh_add_device_tree_early(struct device_node *); | 201 | void eeh_add_device_tree_early(struct device_node *); |
132 | void eeh_add_device_tree_late(struct pci_bus *); | 202 | void eeh_add_device_tree_late(struct pci_bus *); |
133 | void eeh_remove_bus_device(struct pci_dev *); | 203 | void eeh_remove_bus_device(struct pci_dev *); |
@@ -156,34 +226,24 @@ static inline void *eeh_dev_init(struct device_node *dn, void *data) | |||
156 | 226 | ||
157 | static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { } | 227 | static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { } |
158 | 228 | ||
159 | static inline void eeh_dev_phb_init(void) { } | ||
160 | |||
161 | static inline void eeh_init(void) { } | ||
162 | |||
163 | #ifdef CONFIG_PPC_PSERIES | ||
164 | static inline int eeh_pseries_init(void) | ||
165 | { | ||
166 | return 0; | ||
167 | } | ||
168 | #endif /* CONFIG_PPC_PSERIES */ | ||
169 | |||
170 | static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) | 229 | static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) |
171 | { | 230 | { |
172 | return val; | 231 | return val; |
173 | } | 232 | } |
174 | 233 | ||
175 | static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | 234 | #define eeh_dev_check_failure(x) (0) |
176 | { | ||
177 | return 0; | ||
178 | } | ||
179 | 235 | ||
180 | static inline void pci_addr_cache_build(void) { } | 236 | static inline void eeh_addr_cache_build(void) { } |
181 | 237 | ||
182 | static inline void eeh_add_device_tree_early(struct device_node *dn) { } | 238 | static inline void eeh_add_device_tree_early(struct device_node *dn) { } |
183 | 239 | ||
184 | static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } | 240 | static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } |
185 | 241 | ||
186 | static inline void eeh_remove_bus_device(struct pci_dev *dev) { } | 242 | static inline void eeh_remove_bus_device(struct pci_dev *dev) { } |
243 | |||
244 | static inline void eeh_lock(void) { } | ||
245 | static inline void eeh_unlock(void) { } | ||
246 | |||
187 | #define EEH_POSSIBLE_ERROR(val, type) (0) | 247 | #define EEH_POSSIBLE_ERROR(val, type) (0) |
188 | #define EEH_IO_ERROR_VALUE(size) (-1UL) | 248 | #define EEH_IO_ERROR_VALUE(size) (-1UL) |
189 | #endif /* CONFIG_EEH */ | 249 | #endif /* CONFIG_EEH */ |
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h index c68b012b7797..de67d830151b 100644 --- a/arch/powerpc/include/asm/eeh_event.h +++ b/arch/powerpc/include/asm/eeh_event.h | |||
@@ -28,11 +28,11 @@ | |||
28 | */ | 28 | */ |
29 | struct eeh_event { | 29 | struct eeh_event { |
30 | struct list_head list; /* to form event queue */ | 30 | struct list_head list; /* to form event queue */ |
31 | struct eeh_dev *edev; /* EEH device */ | 31 | struct eeh_pe *pe; /* EEH PE */ |
32 | }; | 32 | }; |
33 | 33 | ||
34 | int eeh_send_failure_event(struct eeh_dev *edev); | 34 | int eeh_send_failure_event(struct eeh_pe *pe); |
35 | struct eeh_dev *handle_eeh_events(struct eeh_event *); | 35 | void eeh_handle_event(struct eeh_pe *pe); |
36 | 36 | ||
37 | #endif /* __KERNEL__ */ | 37 | #endif /* __KERNEL__ */ |
38 | #endif /* ASM_POWERPC_EEH_EVENT_H */ | 38 | #endif /* ASM_POWERPC_EEH_EVENT_H */ |
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index ac13addb8495..51fa43e536b9 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h | |||
@@ -37,6 +37,7 @@ | |||
37 | * critical data | 37 | * critical data |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #define PACA_EXGDBELL PACA_EXGEN | ||
40 | 41 | ||
41 | /* We are out of SPRGs so we save some things in the PACA. The normal | 42 | /* We are out of SPRGs so we save some things in the PACA. The normal |
42 | * exception frame is smaller than the CRIT or MC one though | 43 | * exception frame is smaller than the CRIT or MC one though |
@@ -45,8 +46,9 @@ | |||
45 | #define EX_CR (1 * 8) | 46 | #define EX_CR (1 * 8) |
46 | #define EX_R10 (2 * 8) | 47 | #define EX_R10 (2 * 8) |
47 | #define EX_R11 (3 * 8) | 48 | #define EX_R11 (3 * 8) |
48 | #define EX_R14 (4 * 8) | 49 | #define EX_R13 (4 * 8) |
49 | #define EX_R15 (5 * 8) | 50 | #define EX_R14 (5 * 8) |
51 | #define EX_R15 (6 * 8) | ||
50 | 52 | ||
51 | /* | 53 | /* |
52 | * The TLB miss exception uses different slots. | 54 | * The TLB miss exception uses different slots. |
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 423cf9eaf4a4..7a867065db79 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -152,11 +152,6 @@ | |||
152 | #define H_VASI_RESUMED 5 | 152 | #define H_VASI_RESUMED 5 |
153 | #define H_VASI_COMPLETED 6 | 153 | #define H_VASI_COMPLETED 6 |
154 | 154 | ||
155 | /* DABRX flags */ | ||
156 | #define H_DABRX_HYPERVISOR (1UL<<(63-61)) | ||
157 | #define H_DABRX_KERNEL (1UL<<(63-62)) | ||
158 | #define H_DABRX_USER (1UL<<(63-63)) | ||
159 | |||
160 | /* Each control block has to be on a 4K boundary */ | 155 | /* Each control block has to be on a 4K boundary */ |
161 | #define H_CB_ALIGNMENT 4096 | 156 | #define H_CB_ALIGNMENT 4096 |
162 | 157 | ||
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index be04330af751..423424599dad 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h | |||
@@ -27,10 +27,11 @@ | |||
27 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 27 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
28 | 28 | ||
29 | struct arch_hw_breakpoint { | 29 | struct arch_hw_breakpoint { |
30 | bool extraneous_interrupt; | ||
31 | u8 len; /* length of the target data symbol */ | ||
32 | int type; | ||
33 | unsigned long address; | 30 | unsigned long address; |
31 | unsigned long dabrx; | ||
32 | int type; | ||
33 | u8 len; /* length of the target data symbol */ | ||
34 | bool extraneous_interrupt; | ||
34 | }; | 35 | }; |
35 | 36 | ||
36 | #include <linux/kdebug.h> | 37 | #include <linux/kdebug.h> |
@@ -61,7 +62,7 @@ extern void ptrace_triggered(struct perf_event *bp, | |||
61 | struct perf_sample_data *data, struct pt_regs *regs); | 62 | struct perf_sample_data *data, struct pt_regs *regs); |
62 | static inline void hw_breakpoint_disable(void) | 63 | static inline void hw_breakpoint_disable(void) |
63 | { | 64 | { |
64 | set_dabr(0); | 65 | set_dabr(0, 0); |
65 | } | 66 | } |
66 | extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); | 67 | extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); |
67 | 68 | ||
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index be0171afdc0f..7b6feab6fd26 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h | |||
@@ -29,21 +29,16 @@ | |||
29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
30 | #include <linux/ptrace.h> | 30 | #include <linux/ptrace.h> |
31 | #include <linux/percpu.h> | 31 | #include <linux/percpu.h> |
32 | #include <asm/probes.h> | ||
32 | 33 | ||
33 | #define __ARCH_WANT_KPROBES_INSN_SLOT | 34 | #define __ARCH_WANT_KPROBES_INSN_SLOT |
34 | 35 | ||
35 | struct pt_regs; | 36 | struct pt_regs; |
36 | struct kprobe; | 37 | struct kprobe; |
37 | 38 | ||
38 | typedef unsigned int kprobe_opcode_t; | 39 | typedef ppc_opcode_t kprobe_opcode_t; |
39 | #define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */ | ||
40 | #define MAX_INSN_SIZE 1 | 40 | #define MAX_INSN_SIZE 1 |
41 | 41 | ||
42 | #define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008) | ||
43 | #define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088) | ||
44 | #define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000) | ||
45 | #define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000) | ||
46 | |||
47 | #ifdef CONFIG_PPC64 | 42 | #ifdef CONFIG_PPC64 |
48 | /* | 43 | /* |
49 | * 64bit powerpc uses function descriptors. | 44 | * 64bit powerpc uses function descriptors. |
@@ -72,12 +67,6 @@ typedef unsigned int kprobe_opcode_t; | |||
72 | addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \ | 67 | addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \ |
73 | } \ | 68 | } \ |
74 | } | 69 | } |
75 | |||
76 | #define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ | ||
77 | IS_TWI(instr) || IS_TDI(instr)) | ||
78 | #else | ||
79 | /* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */ | ||
80 | #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) | ||
81 | #endif | 70 | #endif |
82 | 71 | ||
83 | #define flush_insn_slot(p) do { } while (0) | 72 | #define flush_insn_slot(p) do { } while (0) |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index bfcd00c1485d..88609b23b775 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -74,7 +74,6 @@ struct kvmppc_host_state { | |||
74 | ulong vmhandler; | 74 | ulong vmhandler; |
75 | ulong scratch0; | 75 | ulong scratch0; |
76 | ulong scratch1; | 76 | ulong scratch1; |
77 | ulong sprg3; | ||
78 | u8 in_guest; | 77 | u8 in_guest; |
79 | u8 restore_hid5; | 78 | u8 restore_hid5; |
80 | u8 napping; | 79 | u8 napping; |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index f7706d722b39..8111e1b78f7f 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -180,7 +180,8 @@ struct machdep_calls { | |||
180 | void (*enable_pmcs)(void); | 180 | void (*enable_pmcs)(void); |
181 | 181 | ||
182 | /* Set DABR for this platform, leave empty for default implemenation */ | 182 | /* Set DABR for this platform, leave empty for default implemenation */ |
183 | int (*set_dabr)(unsigned long dabr); | 183 | int (*set_dabr)(unsigned long dabr, |
184 | unsigned long dabrx); | ||
184 | 185 | ||
185 | #ifdef CONFIG_PPC32 /* XXX for now */ | 186 | #ifdef CONFIG_PPC32 /* XXX for now */ |
186 | /* A general init function, called by ppc_init in init/main.c. | 187 | /* A general init function, called by ppc_init in init/main.c. |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index daf813fea91f..7796519fd238 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -136,6 +136,7 @@ struct paca_struct { | |||
136 | u8 io_sync; /* writel() needs spin_unlock sync */ | 136 | u8 io_sync; /* writel() needs spin_unlock sync */ |
137 | u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ | 137 | u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ |
138 | u8 nap_state_lost; /* NV GPR values lost in power7_idle */ | 138 | u8 nap_state_lost; /* NV GPR values lost in power7_idle */ |
139 | u64 sprg3; /* Saved user-visible sprg */ | ||
139 | 140 | ||
140 | #ifdef CONFIG_PPC_POWERNV | 141 | #ifdef CONFIG_PPC_POWERNV |
141 | /* Pointer to OPAL machine check event structure set by the | 142 | /* Pointer to OPAL machine check event structure set by the |
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 8cccbee61519..973df4d9d366 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h | |||
@@ -184,6 +184,8 @@ static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn) | |||
184 | { | 184 | { |
185 | return PCI_DN(dn)->edev; | 185 | return PCI_DN(dn)->edev; |
186 | } | 186 | } |
187 | #else | ||
188 | #define of_node_to_eeh_dev(x) (NULL) | ||
187 | #endif | 189 | #endif |
188 | 190 | ||
189 | /** Find the bus corresponding to the indicated device node */ | 191 | /** Find the bus corresponding to the indicated device node */ |
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 80fa704d410f..ed57fa7920c8 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h | |||
@@ -47,19 +47,17 @@ extern int rtas_setup_phb(struct pci_controller *phb); | |||
47 | 47 | ||
48 | #ifdef CONFIG_EEH | 48 | #ifdef CONFIG_EEH |
49 | 49 | ||
50 | void pci_addr_cache_build(void); | 50 | void eeh_addr_cache_insert_dev(struct pci_dev *dev); |
51 | void pci_addr_cache_insert_device(struct pci_dev *dev); | 51 | void eeh_addr_cache_rmv_dev(struct pci_dev *dev); |
52 | void pci_addr_cache_remove_device(struct pci_dev *dev); | 52 | struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr); |
53 | struct pci_dev *pci_addr_cache_get_device(unsigned long addr); | 53 | void eeh_slot_error_detail(struct eeh_pe *pe, int severity); |
54 | void eeh_slot_error_detail(struct eeh_dev *edev, int severity); | 54 | int eeh_pci_enable(struct eeh_pe *pe, int function); |
55 | int eeh_pci_enable(struct eeh_dev *edev, int function); | 55 | int eeh_reset_pe(struct eeh_pe *); |
56 | int eeh_reset_pe(struct eeh_dev *); | 56 | void eeh_save_bars(struct eeh_dev *edev); |
57 | void eeh_restore_bars(struct eeh_dev *); | ||
58 | int rtas_write_config(struct pci_dn *, int where, int size, u32 val); | 57 | int rtas_write_config(struct pci_dn *, int where, int size, u32 val); |
59 | int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); | 58 | int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); |
60 | void eeh_mark_slot(struct device_node *dn, int mode_flag); | 59 | void eeh_pe_state_mark(struct eeh_pe *pe, int state); |
61 | void eeh_clear_slot(struct device_node *dn, int mode_flag); | 60 | void eeh_pe_state_clear(struct eeh_pe *pe, int state); |
62 | struct device_node *eeh_find_device_pe(struct device_node *dn); | ||
63 | 61 | ||
64 | void eeh_sysfs_add_device(struct pci_dev *pdev); | 62 | void eeh_sysfs_add_device(struct pci_dev *pdev); |
65 | void eeh_sysfs_remove_device(struct pci_dev *pdev); | 63 | void eeh_sysfs_remove_device(struct pci_dev *pdev); |
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h new file mode 100644 index 000000000000..5f1e15b68704 --- /dev/null +++ b/arch/powerpc/include/asm/probes.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _ASM_POWERPC_PROBES_H | ||
2 | #define _ASM_POWERPC_PROBES_H | ||
3 | #ifdef __KERNEL__ | ||
4 | /* | ||
5 | * Definitions common to probes files | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | * Copyright IBM Corporation, 2012 | ||
22 | */ | ||
23 | #include <linux/types.h> | ||
24 | |||
25 | typedef u32 ppc_opcode_t; | ||
26 | #define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */ | ||
27 | |||
28 | /* Trap definitions per ISA */ | ||
29 | #define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008) | ||
30 | #define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088) | ||
31 | #define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000) | ||
32 | #define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000) | ||
33 | |||
34 | #ifdef CONFIG_PPC64 | ||
35 | #define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ | ||
36 | IS_TWI(instr) || IS_TDI(instr)) | ||
37 | #else | ||
38 | #define is_trap(instr) (IS_TW(instr) || IS_TWI(instr)) | ||
39 | #endif /* CONFIG_PPC64 */ | ||
40 | |||
41 | #endif /* __KERNEL__ */ | ||
42 | #endif /* _ASM_POWERPC_PROBES_H */ | ||
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 54b73a28c205..83efc6e81543 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -219,6 +219,8 @@ struct thread_struct { | |||
219 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 219 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
220 | #endif | 220 | #endif |
221 | unsigned long dabr; /* Data address breakpoint register */ | 221 | unsigned long dabr; /* Data address breakpoint register */ |
222 | unsigned long dabrx; /* ... extension */ | ||
223 | unsigned long trap_nr; /* last trap # on this thread */ | ||
222 | #ifdef CONFIG_ALTIVEC | 224 | #ifdef CONFIG_ALTIVEC |
223 | /* Complete AltiVec register set */ | 225 | /* Complete AltiVec register set */ |
224 | vector128 vr[32] __attribute__((aligned(16))); | 226 | vector128 vr[32] __attribute__((aligned(16))); |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 638608677e2a..121a90bbf778 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -208,6 +208,9 @@ | |||
208 | #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ | 208 | #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ |
209 | #define DABRX_USER (1UL << 0) | 209 | #define DABRX_USER (1UL << 0) |
210 | #define DABRX_KERNEL (1UL << 1) | 210 | #define DABRX_KERNEL (1UL << 1) |
211 | #define DABRX_HYP (1UL << 2) | ||
212 | #define DABRX_BTI (1UL << 3) | ||
213 | #define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER) | ||
211 | #define SPRN_DAR 0x013 /* Data Address Register */ | 214 | #define SPRN_DAR 0x013 /* Data Address Register */ |
212 | #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ | 215 | #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ |
213 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ | 216 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ |
@@ -761,7 +764,8 @@ | |||
761 | * 64-bit embedded | 764 | * 64-bit embedded |
762 | * - SPRG0 generic exception scratch | 765 | * - SPRG0 generic exception scratch |
763 | * - SPRG2 TLB exception stack | 766 | * - SPRG2 TLB exception stack |
764 | * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible) | 767 | * - SPRG3 critical exception scratch and |
768 | * CPU and NUMA node for VDSO getcpu (user visible) | ||
765 | * - SPRG4 unused (user visible) | 769 | * - SPRG4 unused (user visible) |
766 | * - SPRG6 TLB miss scratch (user visible, sorry !) | 770 | * - SPRG6 TLB miss scratch (user visible, sorry !) |
767 | * - SPRG7 critical exception scratch | 771 | * - SPRG7 critical exception scratch |
@@ -858,11 +862,12 @@ | |||
858 | 862 | ||
859 | #ifdef CONFIG_PPC_BOOK3E_64 | 863 | #ifdef CONFIG_PPC_BOOK3E_64 |
860 | #define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8 | 864 | #define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8 |
861 | #define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7 | 865 | #define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG3 |
862 | #define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9 | 866 | #define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9 |
863 | #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 | 867 | #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 |
864 | #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 | 868 | #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 |
865 | #define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 | 869 | #define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 |
870 | #define SPRN_SPRG_GDBELL_SCRATCH SPRN_SPRG_GEN_SCRATCH | ||
866 | 871 | ||
867 | #define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX | 872 | #define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX |
868 | #define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA | 873 | #define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA |
@@ -937,7 +942,7 @@ | |||
937 | #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ | 942 | #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ |
938 | #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ | 943 | #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ |
939 | 944 | ||
940 | #define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv)) | 945 | #define pvr_version_is(pvr) (PVR_VER(mfspr(SPRN_PVR)) == (pvr)) |
941 | 946 | ||
942 | /* | 947 | /* |
943 | * IBM has further subdivided the standard PowerPC 16-bit version and | 948 | * IBM has further subdivided the standard PowerPC 16-bit version and |
@@ -1002,25 +1007,25 @@ | |||
1002 | #define PVR_476_ISS 0x00052000 | 1007 | #define PVR_476_ISS 0x00052000 |
1003 | 1008 | ||
1004 | /* 64-bit processors */ | 1009 | /* 64-bit processors */ |
1005 | /* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */ | 1010 | #define PVR_NORTHSTAR 0x0033 |
1006 | #define PV_NORTHSTAR 0x0033 | 1011 | #define PVR_PULSAR 0x0034 |
1007 | #define PV_PULSAR 0x0034 | 1012 | #define PVR_POWER4 0x0035 |
1008 | #define PV_POWER4 0x0035 | 1013 | #define PVR_ICESTAR 0x0036 |
1009 | #define PV_ICESTAR 0x0036 | 1014 | #define PVR_SSTAR 0x0037 |
1010 | #define PV_SSTAR 0x0037 | 1015 | #define PVR_POWER4p 0x0038 |
1011 | #define PV_POWER4p 0x0038 | 1016 | #define PVR_970 0x0039 |
1012 | #define PV_970 0x0039 | 1017 | #define PVR_POWER5 0x003A |
1013 | #define PV_POWER5 0x003A | 1018 | #define PVR_POWER5p 0x003B |
1014 | #define PV_POWER5p 0x003B | 1019 | #define PVR_970FX 0x003C |
1015 | #define PV_970FX 0x003C | 1020 | #define PVR_POWER6 0x003E |
1016 | #define PV_POWER6 0x003E | 1021 | #define PVR_POWER7 0x003F |
1017 | #define PV_POWER7 0x003F | 1022 | #define PVR_630 0x0040 |
1018 | #define PV_630 0x0040 | 1023 | #define PVR_630p 0x0041 |
1019 | #define PV_630p 0x0041 | 1024 | #define PVR_970MP 0x0044 |
1020 | #define PV_970MP 0x0044 | 1025 | #define PVR_970GX 0x0045 |
1021 | #define PV_970GX 0x0045 | 1026 | #define PVR_POWER7p 0x004A |
1022 | #define PV_BE 0x0070 | 1027 | #define PVR_BE 0x0070 |
1023 | #define PV_PA6T 0x0090 | 1028 | #define PVR_PA6T 0x0090 |
1024 | 1029 | ||
1025 | /* Macros for setting and retrieving special purpose registers */ | 1030 | /* Macros for setting and retrieving special purpose registers */ |
1026 | #ifndef __ASSEMBLY__ | 1031 | #ifndef __ASSEMBLY__ |
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index d084ce195fc3..8b9a306260b2 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h | |||
@@ -9,7 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex); | |||
9 | extern unsigned int rtas_data; | 9 | extern unsigned int rtas_data; |
10 | extern int mem_init_done; /* set on boot once kmalloc can be called */ | 10 | extern int mem_init_done; /* set on boot once kmalloc can be called */ |
11 | extern int init_bootmem_done; /* set once bootmem is available */ | 11 | extern int init_bootmem_done; /* set once bootmem is available */ |
12 | extern phys_addr_t memory_limit; | 12 | extern unsigned long long memory_limit; |
13 | extern unsigned long klimit; | 13 | extern unsigned long klimit; |
14 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); | 14 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
15 | 15 | ||
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index faf93529cbf0..e942203cd4a8 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -102,6 +102,7 @@ static inline struct thread_info *current_thread_info(void) | |||
102 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ | 102 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ |
103 | #define TIF_NOERROR 12 /* Force successful syscall return */ | 103 | #define TIF_NOERROR 12 /* Force successful syscall return */ |
104 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ | 104 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ |
105 | #define TIF_UPROBE 14 /* breakpointed or single-stepping */ | ||
105 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ | 106 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ |
106 | 107 | ||
107 | /* as above, but as bit values */ | 108 | /* as above, but as bit values */ |
@@ -118,12 +119,13 @@ static inline struct thread_info *current_thread_info(void) | |||
118 | #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) | 119 | #define _TIF_RESTOREALL (1<<TIF_RESTOREALL) |
119 | #define _TIF_NOERROR (1<<TIF_NOERROR) | 120 | #define _TIF_NOERROR (1<<TIF_NOERROR) |
120 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 121 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
122 | #define _TIF_UPROBE (1<<TIF_UPROBE) | ||
121 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | 123 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
122 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 124 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
123 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) | 125 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) |
124 | 126 | ||
125 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | 127 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
126 | _TIF_NOTIFY_RESUME) | 128 | _TIF_NOTIFY_RESUME | _TIF_UPROBE) |
127 | #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) | 129 | #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) |
128 | 130 | ||
129 | /* Bits in local_flags */ | 131 | /* Bits in local_flags */ |
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h new file mode 100644 index 000000000000..b532060d0916 --- /dev/null +++ b/arch/powerpc/include/asm/uprobes.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef _ASM_UPROBES_H | ||
2 | #define _ASM_UPROBES_H | ||
3 | /* | ||
4 | * User-space Probes (UProbes) for powerpc | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | * Copyright IBM Corporation, 2007-2012 | ||
21 | * | ||
22 | * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com> | ||
23 | */ | ||
24 | |||
25 | #include <linux/notifier.h> | ||
26 | #include <asm/probes.h> | ||
27 | |||
28 | typedef ppc_opcode_t uprobe_opcode_t; | ||
29 | |||
30 | #define MAX_UINSN_BYTES 4 | ||
31 | #define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES) | ||
32 | |||
33 | /* The following alias is needed for reference from arch-agnostic code */ | ||
34 | #define UPROBE_SWBP_INSN BREAKPOINT_INSTRUCTION | ||
35 | #define UPROBE_SWBP_INSN_SIZE 4 /* swbp insn size in bytes */ | ||
36 | |||
37 | struct arch_uprobe { | ||
38 | union { | ||
39 | u8 insn[MAX_UINSN_BYTES]; | ||
40 | u32 ainsn; | ||
41 | }; | ||
42 | }; | ||
43 | |||
44 | struct arch_uprobe_task { | ||
45 | unsigned long saved_trap_nr; | ||
46 | }; | ||
47 | |||
48 | extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); | ||
49 | extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); | ||
50 | extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); | ||
51 | extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); | ||
52 | extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); | ||
53 | extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); | ||
54 | #endif /* _ASM_UPROBES_H */ | ||
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index bb282dd81612..cde12f8a4ebc 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -96,6 +96,7 @@ obj-$(CONFIG_MODULES) += ppc_ksyms.o | |||
96 | obj-$(CONFIG_BOOTX_TEXT) += btext.o | 96 | obj-$(CONFIG_BOOTX_TEXT) += btext.o |
97 | obj-$(CONFIG_SMP) += smp.o | 97 | obj-$(CONFIG_SMP) += smp.o |
98 | obj-$(CONFIG_KPROBES) += kprobes.o | 98 | obj-$(CONFIG_KPROBES) += kprobes.o |
99 | obj-$(CONFIG_UPROBES) += uprobes.o | ||
99 | obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o | 100 | obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o |
100 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 101 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
101 | obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o | 102 | obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index e8995727b1c1..7523539cfe9f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -206,6 +206,7 @@ int main(void) | |||
206 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | 206 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); |
207 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); | 207 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); |
208 | DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); | 208 | DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); |
209 | DEFINE(PACA_SPRG3, offsetof(struct paca_struct, sprg3)); | ||
209 | #endif /* CONFIG_PPC64 */ | 210 | #endif /* CONFIG_PPC64 */ |
210 | 211 | ||
211 | /* RTAS */ | 212 | /* RTAS */ |
@@ -534,7 +535,6 @@ int main(void) | |||
534 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 535 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
535 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 536 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
536 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 537 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
537 | HSTATE_FIELD(HSTATE_SPRG3, sprg3); | ||
538 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 538 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
539 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | 539 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); |
540 | HSTATE_FIELD(HSTATE_NAPPING, napping); | 540 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 46943651da23..a720b54b971c 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
15 | #include <linux/memblock.h> | ||
15 | #include <linux/pfn.h> | 16 | #include <linux/pfn.h> |
16 | #include <linux/of_platform.h> | 17 | #include <linux/of_platform.h> |
17 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
@@ -20,7 +21,6 @@ | |||
20 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
21 | #include <asm/swiotlb.h> | 22 | #include <asm/swiotlb.h> |
22 | #include <asm/dma.h> | 23 | #include <asm/dma.h> |
23 | #include <asm/abs_addr.h> | ||
24 | 24 | ||
25 | unsigned int ppc_swiotlb_enable; | 25 | unsigned int ppc_swiotlb_enable; |
26 | 26 | ||
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 355b9d84b0f8..8032b97ccdcb 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <asm/vio.h> | 15 | #include <asm/vio.h> |
16 | #include <asm/bug.h> | 16 | #include <asm/bug.h> |
17 | #include <asm/abs_addr.h> | ||
18 | #include <asm/machdep.h> | 17 | #include <asm/machdep.h> |
19 | 18 | ||
20 | /* | 19 | /* |
@@ -50,7 +49,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
50 | return NULL; | 49 | return NULL; |
51 | ret = page_address(page); | 50 | ret = page_address(page); |
52 | memset(ret, 0, size); | 51 | memset(ret, 0, size); |
53 | *dma_handle = virt_to_abs(ret) + get_dma_offset(dev); | 52 | *dma_handle = __pa(ret) + get_dma_offset(dev); |
54 | 53 | ||
55 | return ret; | 54 | return ret; |
56 | #endif | 55 | #endif |
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 98be7f0cd227..87a82fbdf05a 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <asm/ppc-opcode.h> | 25 | #include <asm/ppc-opcode.h> |
26 | #include <asm/mmu.h> | 26 | #include <asm/mmu.h> |
27 | #include <asm/hw_irq.h> | 27 | #include <asm/hw_irq.h> |
28 | #include <asm/kvm_asm.h> | ||
29 | #include <asm/kvm_booke_hv_asm.h> | ||
28 | 30 | ||
29 | /* XXX This will ultimately add space for a special exception save | 31 | /* XXX This will ultimately add space for a special exception save |
30 | * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... | 32 | * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... |
@@ -35,16 +37,18 @@ | |||
35 | #define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE | 37 | #define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE |
36 | 38 | ||
37 | /* Exception prolog code for all exceptions */ | 39 | /* Exception prolog code for all exceptions */ |
38 | #define EXCEPTION_PROLOG(n, type, addition) \ | 40 | #define EXCEPTION_PROLOG(n, intnum, type, addition) \ |
39 | mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ | 41 | mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ |
40 | mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ | 42 | mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ |
41 | std r10,PACA_EX##type+EX_R10(r13); \ | 43 | std r10,PACA_EX##type+EX_R10(r13); \ |
42 | std r11,PACA_EX##type+EX_R11(r13); \ | 44 | std r11,PACA_EX##type+EX_R11(r13); \ |
45 | PROLOG_STORE_RESTORE_SCRATCH_##type; \ | ||
43 | mfcr r10; /* save CR */ \ | 46 | mfcr r10; /* save CR */ \ |
47 | mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ | ||
48 | DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ | ||
49 | stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ | ||
44 | addition; /* additional code for that exc. */ \ | 50 | addition; /* additional code for that exc. */ \ |
45 | std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ | 51 | std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ |
46 | stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ | ||
47 | mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ | ||
48 | type##_SET_KSTACK; /* get special stack if necessary */\ | 52 | type##_SET_KSTACK; /* get special stack if necessary */\ |
49 | andi. r10,r11,MSR_PR; /* save stack pointer */ \ | 53 | andi. r10,r11,MSR_PR; /* save stack pointer */ \ |
50 | beq 1f; /* branch around if supervisor */ \ | 54 | beq 1f; /* branch around if supervisor */ \ |
@@ -59,6 +63,10 @@ | |||
59 | #define SPRN_GEN_SRR0 SPRN_SRR0 | 63 | #define SPRN_GEN_SRR0 SPRN_SRR0 |
60 | #define SPRN_GEN_SRR1 SPRN_SRR1 | 64 | #define SPRN_GEN_SRR1 SPRN_SRR1 |
61 | 65 | ||
66 | #define GDBELL_SET_KSTACK GEN_SET_KSTACK | ||
67 | #define SPRN_GDBELL_SRR0 SPRN_GSRR0 | ||
68 | #define SPRN_GDBELL_SRR1 SPRN_GSRR1 | ||
69 | |||
62 | #define CRIT_SET_KSTACK \ | 70 | #define CRIT_SET_KSTACK \ |
63 | ld r1,PACA_CRIT_STACK(r13); \ | 71 | ld r1,PACA_CRIT_STACK(r13); \ |
64 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; | 72 | subi r1,r1,SPECIAL_EXC_FRAME_SIZE; |
@@ -77,29 +85,46 @@ | |||
77 | #define SPRN_MC_SRR0 SPRN_MCSRR0 | 85 | #define SPRN_MC_SRR0 SPRN_MCSRR0 |
78 | #define SPRN_MC_SRR1 SPRN_MCSRR1 | 86 | #define SPRN_MC_SRR1 SPRN_MCSRR1 |
79 | 87 | ||
80 | #define NORMAL_EXCEPTION_PROLOG(n, addition) \ | 88 | #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ |
81 | EXCEPTION_PROLOG(n, GEN, addition##_GEN(n)) | 89 | EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) |
82 | 90 | ||
83 | #define CRIT_EXCEPTION_PROLOG(n, addition) \ | 91 | #define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \ |
84 | EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n)) | 92 | EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n)) |
85 | 93 | ||
86 | #define DBG_EXCEPTION_PROLOG(n, addition) \ | 94 | #define DBG_EXCEPTION_PROLOG(n, intnum, addition) \ |
87 | EXCEPTION_PROLOG(n, DBG, addition##_DBG(n)) | 95 | EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n)) |
88 | 96 | ||
89 | #define MC_EXCEPTION_PROLOG(n, addition) \ | 97 | #define MC_EXCEPTION_PROLOG(n, intnum, addition) \ |
90 | EXCEPTION_PROLOG(n, MC, addition##_MC(n)) | 98 | EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n)) |
91 | 99 | ||
100 | #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ | ||
101 | EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) | ||
102 | |||
103 | /* | ||
104 | * Store user-visible scratch in PACA exception slots and restore proper value | ||
105 | */ | ||
106 | #define PROLOG_STORE_RESTORE_SCRATCH_GEN | ||
107 | #define PROLOG_STORE_RESTORE_SCRATCH_GDBELL | ||
108 | #define PROLOG_STORE_RESTORE_SCRATCH_DBG | ||
109 | #define PROLOG_STORE_RESTORE_SCRATCH_MC | ||
110 | |||
111 | #define PROLOG_STORE_RESTORE_SCRATCH_CRIT \ | ||
112 | mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \ | ||
113 | std r10,PACA_EXCRIT+EX_R13(r13); \ | ||
114 | ld r11,PACA_SPRG3(r13); \ | ||
115 | mtspr SPRN_SPRG_CRIT_SCRATCH,r11; | ||
92 | 116 | ||
93 | /* Variants of the "addition" argument for the prolog | 117 | /* Variants of the "addition" argument for the prolog |
94 | */ | 118 | */ |
95 | #define PROLOG_ADDITION_NONE_GEN(n) | 119 | #define PROLOG_ADDITION_NONE_GEN(n) |
120 | #define PROLOG_ADDITION_NONE_GDBELL(n) | ||
96 | #define PROLOG_ADDITION_NONE_CRIT(n) | 121 | #define PROLOG_ADDITION_NONE_CRIT(n) |
97 | #define PROLOG_ADDITION_NONE_DBG(n) | 122 | #define PROLOG_ADDITION_NONE_DBG(n) |
98 | #define PROLOG_ADDITION_NONE_MC(n) | 123 | #define PROLOG_ADDITION_NONE_MC(n) |
99 | 124 | ||
100 | #define PROLOG_ADDITION_MASKABLE_GEN(n) \ | 125 | #define PROLOG_ADDITION_MASKABLE_GEN(n) \ |
101 | lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ | 126 | lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ |
102 | cmpwi cr0,r11,0; /* yes -> go out of line */ \ | 127 | cmpwi cr0,r10,0; /* yes -> go out of line */ \ |
103 | beq masked_interrupt_book3e_##n | 128 | beq masked_interrupt_book3e_##n |
104 | 129 | ||
105 | #define PROLOG_ADDITION_2REGS_GEN(n) \ | 130 | #define PROLOG_ADDITION_2REGS_GEN(n) \ |
@@ -233,9 +258,9 @@ exc_##n##_bad_stack: \ | |||
233 | 1: | 258 | 1: |
234 | 259 | ||
235 | 260 | ||
236 | #define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ | 261 | #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ |
237 | START_EXCEPTION(label); \ | 262 | START_EXCEPTION(label); \ |
238 | NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ | 263 | NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ |
239 | EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ | 264 | EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ |
240 | ack(r8); \ | 265 | ack(r8); \ |
241 | CHECK_NAPPING(); \ | 266 | CHECK_NAPPING(); \ |
@@ -286,7 +311,8 @@ interrupt_end_book3e: | |||
286 | 311 | ||
287 | /* Critical Input Interrupt */ | 312 | /* Critical Input Interrupt */ |
288 | START_EXCEPTION(critical_input); | 313 | START_EXCEPTION(critical_input); |
289 | CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) | 314 | CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, |
315 | PROLOG_ADDITION_NONE) | ||
290 | // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) | 316 | // EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) |
291 | // bl special_reg_save_crit | 317 | // bl special_reg_save_crit |
292 | // CHECK_NAPPING(); | 318 | // CHECK_NAPPING(); |
@@ -297,7 +323,8 @@ interrupt_end_book3e: | |||
297 | 323 | ||
298 | /* Machine Check Interrupt */ | 324 | /* Machine Check Interrupt */ |
299 | START_EXCEPTION(machine_check); | 325 | START_EXCEPTION(machine_check); |
300 | CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) | 326 | MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK, |
327 | PROLOG_ADDITION_NONE) | ||
301 | // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) | 328 | // EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) |
302 | // bl special_reg_save_mc | 329 | // bl special_reg_save_mc |
303 | // addi r3,r1,STACK_FRAME_OVERHEAD | 330 | // addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -308,7 +335,8 @@ interrupt_end_book3e: | |||
308 | 335 | ||
309 | /* Data Storage Interrupt */ | 336 | /* Data Storage Interrupt */ |
310 | START_EXCEPTION(data_storage) | 337 | START_EXCEPTION(data_storage) |
311 | NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) | 338 | NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE, |
339 | PROLOG_ADDITION_2REGS) | ||
312 | mfspr r14,SPRN_DEAR | 340 | mfspr r14,SPRN_DEAR |
313 | mfspr r15,SPRN_ESR | 341 | mfspr r15,SPRN_ESR |
314 | EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) | 342 | EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) |
@@ -316,18 +344,21 @@ interrupt_end_book3e: | |||
316 | 344 | ||
317 | /* Instruction Storage Interrupt */ | 345 | /* Instruction Storage Interrupt */ |
318 | START_EXCEPTION(instruction_storage); | 346 | START_EXCEPTION(instruction_storage); |
319 | NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) | 347 | NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE, |
348 | PROLOG_ADDITION_2REGS) | ||
320 | li r15,0 | 349 | li r15,0 |
321 | mr r14,r10 | 350 | mr r14,r10 |
322 | EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) | 351 | EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) |
323 | b storage_fault_common | 352 | b storage_fault_common |
324 | 353 | ||
325 | /* External Input Interrupt */ | 354 | /* External Input Interrupt */ |
326 | MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE) | 355 | MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, |
356 | external_input, .do_IRQ, ACK_NONE) | ||
327 | 357 | ||
328 | /* Alignment */ | 358 | /* Alignment */ |
329 | START_EXCEPTION(alignment); | 359 | START_EXCEPTION(alignment); |
330 | NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS) | 360 | NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, |
361 | PROLOG_ADDITION_2REGS) | ||
331 | mfspr r14,SPRN_DEAR | 362 | mfspr r14,SPRN_DEAR |
332 | mfspr r15,SPRN_ESR | 363 | mfspr r15,SPRN_ESR |
333 | EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) | 364 | EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) |
@@ -335,7 +366,8 @@ interrupt_end_book3e: | |||
335 | 366 | ||
336 | /* Program Interrupt */ | 367 | /* Program Interrupt */ |
337 | START_EXCEPTION(program); | 368 | START_EXCEPTION(program); |
338 | NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) | 369 | NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, |
370 | PROLOG_ADDITION_1REG) | ||
339 | mfspr r14,SPRN_ESR | 371 | mfspr r14,SPRN_ESR |
340 | EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) | 372 | EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) |
341 | std r14,_DSISR(r1) | 373 | std r14,_DSISR(r1) |
@@ -347,7 +379,8 @@ interrupt_end_book3e: | |||
347 | 379 | ||
348 | /* Floating Point Unavailable Interrupt */ | 380 | /* Floating Point Unavailable Interrupt */ |
349 | START_EXCEPTION(fp_unavailable); | 381 | START_EXCEPTION(fp_unavailable); |
350 | NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE) | 382 | NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, |
383 | PROLOG_ADDITION_NONE) | ||
351 | /* we can probably do a shorter exception entry for that one... */ | 384 | /* we can probably do a shorter exception entry for that one... */ |
352 | EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) | 385 | EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) |
353 | ld r12,_MSR(r1) | 386 | ld r12,_MSR(r1) |
@@ -362,14 +395,17 @@ interrupt_end_book3e: | |||
362 | b .ret_from_except | 395 | b .ret_from_except |
363 | 396 | ||
364 | /* Decrementer Interrupt */ | 397 | /* Decrementer Interrupt */ |
365 | MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC) | 398 | MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, |
399 | decrementer, .timer_interrupt, ACK_DEC) | ||
366 | 400 | ||
367 | /* Fixed Interval Timer Interrupt */ | 401 | /* Fixed Interval Timer Interrupt */ |
368 | MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT) | 402 | MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, |
403 | fixed_interval, .unknown_exception, ACK_FIT) | ||
369 | 404 | ||
370 | /* Watchdog Timer Interrupt */ | 405 | /* Watchdog Timer Interrupt */ |
371 | START_EXCEPTION(watchdog); | 406 | START_EXCEPTION(watchdog); |
372 | CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) | 407 | CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, |
408 | PROLOG_ADDITION_NONE) | ||
373 | // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) | 409 | // EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) |
374 | // bl special_reg_save_crit | 410 | // bl special_reg_save_crit |
375 | // CHECK_NAPPING(); | 411 | // CHECK_NAPPING(); |
@@ -388,7 +424,8 @@ interrupt_end_book3e: | |||
388 | 424 | ||
389 | /* Auxiliary Processor Unavailable Interrupt */ | 425 | /* Auxiliary Processor Unavailable Interrupt */ |
390 | START_EXCEPTION(ap_unavailable); | 426 | START_EXCEPTION(ap_unavailable); |
391 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) | 427 | NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, |
428 | PROLOG_ADDITION_NONE) | ||
392 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) | 429 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) |
393 | bl .save_nvgprs | 430 | bl .save_nvgprs |
394 | addi r3,r1,STACK_FRAME_OVERHEAD | 431 | addi r3,r1,STACK_FRAME_OVERHEAD |
@@ -397,7 +434,8 @@ interrupt_end_book3e: | |||
397 | 434 | ||
398 | /* Debug exception as a critical interrupt*/ | 435 | /* Debug exception as a critical interrupt*/ |
399 | START_EXCEPTION(debug_crit); | 436 | START_EXCEPTION(debug_crit); |
400 | CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) | 437 | CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, |
438 | PROLOG_ADDITION_2REGS) | ||
401 | 439 | ||
402 | /* | 440 | /* |
403 | * If there is a single step or branch-taken exception in an | 441 | * If there is a single step or branch-taken exception in an |
@@ -431,7 +469,7 @@ interrupt_end_book3e: | |||
431 | mtcr r10 | 469 | mtcr r10 |
432 | ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ | 470 | ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ |
433 | ld r11,PACA_EXCRIT+EX_R11(r13) | 471 | ld r11,PACA_EXCRIT+EX_R11(r13) |
434 | mfspr r13,SPRN_SPRG_CRIT_SCRATCH | 472 | ld r13,PACA_EXCRIT+EX_R13(r13) |
435 | rfci | 473 | rfci |
436 | 474 | ||
437 | /* Normal debug exception */ | 475 | /* Normal debug exception */ |
@@ -444,7 +482,7 @@ interrupt_end_book3e: | |||
444 | /* Now we mash up things to make it look like we are coming on a | 482 | /* Now we mash up things to make it look like we are coming on a |
445 | * normal exception | 483 | * normal exception |
446 | */ | 484 | */ |
447 | mfspr r15,SPRN_SPRG_CRIT_SCRATCH | 485 | ld r15,PACA_EXCRIT+EX_R13(r13) |
448 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 | 486 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 |
449 | mfspr r14,SPRN_DBSR | 487 | mfspr r14,SPRN_DBSR |
450 | EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) | 488 | EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) |
@@ -462,7 +500,8 @@ kernel_dbg_exc: | |||
462 | 500 | ||
463 | /* Debug exception as a debug interrupt*/ | 501 | /* Debug exception as a debug interrupt*/ |
464 | START_EXCEPTION(debug_debug); | 502 | START_EXCEPTION(debug_debug); |
465 | DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS) | 503 | DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, |
504 | PROLOG_ADDITION_2REGS) | ||
466 | 505 | ||
467 | /* | 506 | /* |
468 | * If there is a single step or branch-taken exception in an | 507 | * If there is a single step or branch-taken exception in an |
@@ -523,18 +562,21 @@ kernel_dbg_exc: | |||
523 | b .ret_from_except | 562 | b .ret_from_except |
524 | 563 | ||
525 | START_EXCEPTION(perfmon); | 564 | START_EXCEPTION(perfmon); |
526 | NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE) | 565 | NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, |
566 | PROLOG_ADDITION_NONE) | ||
527 | EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) | 567 | EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) |
528 | addi r3,r1,STACK_FRAME_OVERHEAD | 568 | addi r3,r1,STACK_FRAME_OVERHEAD |
529 | bl .performance_monitor_exception | 569 | bl .performance_monitor_exception |
530 | b .ret_from_except_lite | 570 | b .ret_from_except_lite |
531 | 571 | ||
532 | /* Doorbell interrupt */ | 572 | /* Doorbell interrupt */ |
533 | MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE) | 573 | MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, |
574 | doorbell, .doorbell_exception, ACK_NONE) | ||
534 | 575 | ||
535 | /* Doorbell critical Interrupt */ | 576 | /* Doorbell critical Interrupt */ |
536 | START_EXCEPTION(doorbell_crit); | 577 | START_EXCEPTION(doorbell_crit); |
537 | CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE) | 578 | CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, |
579 | PROLOG_ADDITION_NONE) | ||
538 | // EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) | 580 | // EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) |
539 | // bl special_reg_save_crit | 581 | // bl special_reg_save_crit |
540 | // CHECK_NAPPING(); | 582 | // CHECK_NAPPING(); |
@@ -543,12 +585,24 @@ kernel_dbg_exc: | |||
543 | // b ret_from_crit_except | 585 | // b ret_from_crit_except |
544 | b . | 586 | b . |
545 | 587 | ||
546 | /* Guest Doorbell */ | 588 | /* |
547 | MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) | 589 | * Guest doorbell interrupt |
590 | * This general exception use GSRRx save/restore registers | ||
591 | */ | ||
592 | START_EXCEPTION(guest_doorbell); | ||
593 | GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, | ||
594 | PROLOG_ADDITION_NONE) | ||
595 | EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP) | ||
596 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
597 | bl .save_nvgprs | ||
598 | INTS_RESTORE_HARD | ||
599 | bl .unknown_exception | ||
600 | b .ret_from_except | ||
548 | 601 | ||
549 | /* Guest Doorbell critical Interrupt */ | 602 | /* Guest Doorbell critical Interrupt */ |
550 | START_EXCEPTION(guest_doorbell_crit); | 603 | START_EXCEPTION(guest_doorbell_crit); |
551 | CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE) | 604 | CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, |
605 | PROLOG_ADDITION_NONE) | ||
552 | // EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) | 606 | // EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) |
553 | // bl special_reg_save_crit | 607 | // bl special_reg_save_crit |
554 | // CHECK_NAPPING(); | 608 | // CHECK_NAPPING(); |
@@ -559,7 +613,8 @@ kernel_dbg_exc: | |||
559 | 613 | ||
560 | /* Hypervisor call */ | 614 | /* Hypervisor call */ |
561 | START_EXCEPTION(hypercall); | 615 | START_EXCEPTION(hypercall); |
562 | NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE) | 616 | NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, |
617 | PROLOG_ADDITION_NONE) | ||
563 | EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) | 618 | EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) |
564 | addi r3,r1,STACK_FRAME_OVERHEAD | 619 | addi r3,r1,STACK_FRAME_OVERHEAD |
565 | bl .save_nvgprs | 620 | bl .save_nvgprs |
@@ -569,7 +624,8 @@ kernel_dbg_exc: | |||
569 | 624 | ||
570 | /* Embedded Hypervisor priviledged */ | 625 | /* Embedded Hypervisor priviledged */ |
571 | START_EXCEPTION(ehpriv); | 626 | START_EXCEPTION(ehpriv); |
572 | NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE) | 627 | NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, |
628 | PROLOG_ADDITION_NONE) | ||
573 | EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) | 629 | EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) |
574 | addi r3,r1,STACK_FRAME_OVERHEAD | 630 | addi r3,r1,STACK_FRAME_OVERHEAD |
575 | bl .save_nvgprs | 631 | bl .save_nvgprs |
@@ -582,44 +638,42 @@ kernel_dbg_exc: | |||
582 | * accordingly and if the interrupt is level sensitive, we hard disable | 638 | * accordingly and if the interrupt is level sensitive, we hard disable |
583 | */ | 639 | */ |
584 | 640 | ||
641 | .macro masked_interrupt_book3e paca_irq full_mask | ||
642 | lbz r10,PACAIRQHAPPENED(r13) | ||
643 | ori r10,r10,\paca_irq | ||
644 | stb r10,PACAIRQHAPPENED(r13) | ||
645 | |||
646 | .if \full_mask == 1 | ||
647 | rldicl r10,r11,48,1 /* clear MSR_EE */ | ||
648 | rotldi r11,r10,16 | ||
649 | mtspr SPRN_SRR1,r11 | ||
650 | .endif | ||
651 | |||
652 | lwz r11,PACA_EXGEN+EX_CR(r13) | ||
653 | mtcr r11 | ||
654 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
655 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
656 | mfspr r13,SPRN_SPRG_GEN_SCRATCH | ||
657 | rfi | ||
658 | b . | ||
659 | .endm | ||
660 | |||
585 | masked_interrupt_book3e_0x500: | 661 | masked_interrupt_book3e_0x500: |
586 | /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */ | 662 | // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE |
587 | li r11,PACA_IRQ_EE | 663 | masked_interrupt_book3e PACA_IRQ_EE 1 |
588 | b masked_interrupt_book3e_full_mask | ||
589 | 664 | ||
590 | masked_interrupt_book3e_0x900: | 665 | masked_interrupt_book3e_0x900: |
591 | ACK_DEC(r11); | 666 | ACK_DEC(r10); |
592 | li r11,PACA_IRQ_DEC | 667 | masked_interrupt_book3e PACA_IRQ_DEC 0 |
593 | b masked_interrupt_book3e_no_mask | 668 | |
594 | masked_interrupt_book3e_0x980: | 669 | masked_interrupt_book3e_0x980: |
595 | ACK_FIT(r11); | 670 | ACK_FIT(r10); |
596 | li r11,PACA_IRQ_DEC | 671 | masked_interrupt_book3e PACA_IRQ_DEC 0 |
597 | b masked_interrupt_book3e_no_mask | 672 | |
598 | masked_interrupt_book3e_0x280: | 673 | masked_interrupt_book3e_0x280: |
599 | masked_interrupt_book3e_0x2c0: | 674 | masked_interrupt_book3e_0x2c0: |
600 | li r11,PACA_IRQ_DBELL | 675 | masked_interrupt_book3e PACA_IRQ_DBELL 0 |
601 | b masked_interrupt_book3e_no_mask | ||
602 | 676 | ||
603 | masked_interrupt_book3e_no_mask: | ||
604 | mtcr r10 | ||
605 | lbz r10,PACAIRQHAPPENED(r13) | ||
606 | or r10,r10,r11 | ||
607 | stb r10,PACAIRQHAPPENED(r13) | ||
608 | b 1f | ||
609 | masked_interrupt_book3e_full_mask: | ||
610 | mtcr r10 | ||
611 | lbz r10,PACAIRQHAPPENED(r13) | ||
612 | or r10,r10,r11 | ||
613 | stb r10,PACAIRQHAPPENED(r13) | ||
614 | mfspr r10,SPRN_SRR1 | ||
615 | rldicl r11,r10,48,1 /* clear MSR_EE */ | ||
616 | rotldi r10,r11,16 | ||
617 | mtspr SPRN_SRR1,r10 | ||
618 | 1: ld r10,PACA_EXGEN+EX_R10(r13); | ||
619 | ld r11,PACA_EXGEN+EX_R11(r13); | ||
620 | mfspr r13,SPRN_SPRG_GEN_SCRATCH; | ||
621 | rfi | ||
622 | b . | ||
623 | /* | 677 | /* |
624 | * Called from arch_local_irq_enable when an interrupt needs | 678 | * Called from arch_local_irq_enable when an interrupt needs |
625 | * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280 | 679 | * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280 |
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 18bdf74fa164..06c8202a69cf 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c | |||
@@ -289,8 +289,7 @@ int __init fadump_reserve_mem(void) | |||
289 | else | 289 | else |
290 | memory_limit = memblock_end_of_DRAM(); | 290 | memory_limit = memblock_end_of_DRAM(); |
291 | printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" | 291 | printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" |
292 | " dump, now %#016llx\n", | 292 | " dump, now %#016llx\n", memory_limit); |
293 | (unsigned long long)memory_limit); | ||
294 | } | 293 | } |
295 | if (memory_limit) | 294 | if (memory_limit) |
296 | memory_boundary = memory_limit; | 295 | memory_boundary = memory_limit; |
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 956a4c496de9..a89cae481b04 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c | |||
@@ -73,7 +73,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
73 | * If so, DABR will be populated in single_step_dabr_instruction(). | 73 | * If so, DABR will be populated in single_step_dabr_instruction(). |
74 | */ | 74 | */ |
75 | if (current->thread.last_hit_ubp != bp) | 75 | if (current->thread.last_hit_ubp != bp) |
76 | set_dabr(info->address | info->type | DABR_TRANSLATION); | 76 | set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); |
77 | 77 | ||
78 | return 0; | 78 | return 0; |
79 | } | 79 | } |
@@ -97,7 +97,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | *slot = NULL; | 99 | *slot = NULL; |
100 | set_dabr(0); | 100 | set_dabr(0, 0); |
101 | } | 101 | } |
102 | 102 | ||
103 | /* | 103 | /* |
@@ -170,6 +170,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) | |||
170 | 170 | ||
171 | info->address = bp->attr.bp_addr; | 171 | info->address = bp->attr.bp_addr; |
172 | info->len = bp->attr.bp_len; | 172 | info->len = bp->attr.bp_len; |
173 | info->dabrx = DABRX_ALL; | ||
174 | if (bp->attr.exclude_user) | ||
175 | info->dabrx &= ~DABRX_USER; | ||
176 | if (bp->attr.exclude_kernel) | ||
177 | info->dabrx &= ~DABRX_KERNEL; | ||
178 | if (bp->attr.exclude_hv) | ||
179 | info->dabrx &= ~DABRX_HYP; | ||
173 | 180 | ||
174 | /* | 181 | /* |
175 | * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) | 182 | * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) |
@@ -197,7 +204,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) | |||
197 | 204 | ||
198 | info = counter_arch_bp(tsk->thread.last_hit_ubp); | 205 | info = counter_arch_bp(tsk->thread.last_hit_ubp); |
199 | regs->msr &= ~MSR_SE; | 206 | regs->msr &= ~MSR_SE; |
200 | set_dabr(info->address | info->type | DABR_TRANSLATION); | 207 | set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); |
201 | tsk->thread.last_hit_ubp = NULL; | 208 | tsk->thread.last_hit_ubp = NULL; |
202 | } | 209 | } |
203 | 210 | ||
@@ -215,7 +222,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
215 | unsigned long dar = regs->dar; | 222 | unsigned long dar = regs->dar; |
216 | 223 | ||
217 | /* Disable breakpoints during exception handling */ | 224 | /* Disable breakpoints during exception handling */ |
218 | set_dabr(0); | 225 | set_dabr(0, 0); |
219 | 226 | ||
220 | /* | 227 | /* |
221 | * The counter may be concurrently released but that can only | 228 | * The counter may be concurrently released but that can only |
@@ -281,7 +288,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
281 | if (!info->extraneous_interrupt) | 288 | if (!info->extraneous_interrupt) |
282 | perf_bp_event(bp, regs); | 289 | perf_bp_event(bp, regs); |
283 | 290 | ||
284 | set_dabr(info->address | info->type | DABR_TRANSLATION); | 291 | set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); |
285 | out: | 292 | out: |
286 | rcu_read_unlock(); | 293 | rcu_read_unlock(); |
287 | return rc; | 294 | return rc; |
@@ -294,7 +301,7 @@ int __kprobes single_step_dabr_instruction(struct die_args *args) | |||
294 | { | 301 | { |
295 | struct pt_regs *regs = args->regs; | 302 | struct pt_regs *regs = args->regs; |
296 | struct perf_event *bp = NULL; | 303 | struct perf_event *bp = NULL; |
297 | struct arch_hw_breakpoint *bp_info; | 304 | struct arch_hw_breakpoint *info; |
298 | 305 | ||
299 | bp = current->thread.last_hit_ubp; | 306 | bp = current->thread.last_hit_ubp; |
300 | /* | 307 | /* |
@@ -304,16 +311,16 @@ int __kprobes single_step_dabr_instruction(struct die_args *args) | |||
304 | if (!bp) | 311 | if (!bp) |
305 | return NOTIFY_DONE; | 312 | return NOTIFY_DONE; |
306 | 313 | ||
307 | bp_info = counter_arch_bp(bp); | 314 | info = counter_arch_bp(bp); |
308 | 315 | ||
309 | /* | 316 | /* |
310 | * We shall invoke the user-defined callback function in the single | 317 | * We shall invoke the user-defined callback function in the single |
311 | * stepping handler to confirm to 'trigger-after-execute' semantics | 318 | * stepping handler to confirm to 'trigger-after-execute' semantics |
312 | */ | 319 | */ |
313 | if (!bp_info->extraneous_interrupt) | 320 | if (!info->extraneous_interrupt) |
314 | perf_bp_event(bp, regs); | 321 | perf_bp_event(bp, regs); |
315 | 322 | ||
316 | set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION); | 323 | set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); |
317 | current->thread.last_hit_ubp = NULL; | 324 | current->thread.last_hit_ubp = NULL; |
318 | 325 | ||
319 | /* | 326 | /* |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index b01d14eeca8d..8220baa46faf 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <linux/stat.h> | 47 | #include <linux/stat.h> |
48 | #include <linux/of_platform.h> | 48 | #include <linux/of_platform.h> |
49 | #include <asm/ibmebus.h> | 49 | #include <asm/ibmebus.h> |
50 | #include <asm/abs_addr.h> | ||
51 | 50 | ||
52 | static struct device ibmebus_bus_device = { /* fake "parent" device */ | 51 | static struct device ibmebus_bus_device = { /* fake "parent" device */ |
53 | .init_name = "ibmebus", | 52 | .init_name = "ibmebus", |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 5df777794403..fa9f6c72f557 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -165,7 +165,7 @@ void __init reserve_crashkernel(void) | |||
165 | if (memory_limit && memory_limit <= crashk_res.end) { | 165 | if (memory_limit && memory_limit <= crashk_res.end) { |
166 | memory_limit = crashk_res.end + 1; | 166 | memory_limit = crashk_res.end + 1; |
167 | printk("Adjusted memory limit for crashkernel, now 0x%llx\n", | 167 | printk("Adjusted memory limit for crashkernel, now 0x%llx\n", |
168 | (unsigned long long)memory_limit); | 168 | memory_limit); |
169 | } | 169 | } |
170 | 170 | ||
171 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | 171 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " |
@@ -204,6 +204,12 @@ static struct property crashk_size_prop = { | |||
204 | .value = &crashk_size, | 204 | .value = &crashk_size, |
205 | }; | 205 | }; |
206 | 206 | ||
207 | static struct property memory_limit_prop = { | ||
208 | .name = "linux,memory-limit", | ||
209 | .length = sizeof(unsigned long long), | ||
210 | .value = &memory_limit, | ||
211 | }; | ||
212 | |||
207 | static void __init export_crashk_values(struct device_node *node) | 213 | static void __init export_crashk_values(struct device_node *node) |
208 | { | 214 | { |
209 | struct property *prop; | 215 | struct property *prop; |
@@ -223,6 +229,12 @@ static void __init export_crashk_values(struct device_node *node) | |||
223 | crashk_size = resource_size(&crashk_res); | 229 | crashk_size = resource_size(&crashk_res); |
224 | prom_add_property(node, &crashk_size_prop); | 230 | prom_add_property(node, &crashk_size_prop); |
225 | } | 231 | } |
232 | |||
233 | /* | ||
234 | * memory_limit is required by the kexec-tools to limit the | ||
235 | * crash regions to the actual memory used. | ||
236 | */ | ||
237 | prom_update_property(node, &memory_limit_prop); | ||
226 | } | 238 | } |
227 | 239 | ||
228 | static int __init kexec_setup(void) | 240 | static int __init kexec_setup(void) |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index fbe1a12dc7f1..cd6da855090c 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -142,6 +142,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) | |||
142 | new_paca->hw_cpu_id = 0xffff; | 142 | new_paca->hw_cpu_id = 0xffff; |
143 | new_paca->kexec_state = KEXEC_STATE_NONE; | 143 | new_paca->kexec_state = KEXEC_STATE_NONE; |
144 | new_paca->__current = &init_task; | 144 | new_paca->__current = &init_task; |
145 | new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; | ||
145 | #ifdef CONFIG_PPC_STD_MMU_64 | 146 | #ifdef CONFIG_PPC_STD_MMU_64 |
146 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; | 147 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; |
147 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 148 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 43fea543d686..7f94f760dd0c 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -980,13 +980,14 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) | |||
980 | if (i >= 3 && bus->self->transparent) | 980 | if (i >= 3 && bus->self->transparent) |
981 | continue; | 981 | continue; |
982 | 982 | ||
983 | /* If we are going to re-assign everything, mark the resource | 983 | /* If we're going to reassign everything, we can |
984 | * as unset and move it down to 0 | 984 | * shrink the P2P resource to have size as being |
985 | * of 0 in order to save space. | ||
985 | */ | 986 | */ |
986 | if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { | 987 | if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { |
987 | res->flags |= IORESOURCE_UNSET; | 988 | res->flags |= IORESOURCE_UNSET; |
988 | res->end -= res->start; | ||
989 | res->start = 0; | 989 | res->start = 0; |
990 | res->end = -1; | ||
990 | continue; | 991 | continue; |
991 | } | 992 | } |
992 | 993 | ||
@@ -1248,7 +1249,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) | |||
1248 | pr_warning("PCI: Cannot allocate resource region " | 1249 | pr_warning("PCI: Cannot allocate resource region " |
1249 | "%d of PCI bridge %d, will remap\n", i, bus->number); | 1250 | "%d of PCI bridge %d, will remap\n", i, bus->number); |
1250 | clear_resource: | 1251 | clear_resource: |
1251 | res->start = res->end = 0; | 1252 | /* The resource might be figured out when doing |
1253 | * reassignment based on the resources required | ||
1254 | * by the downstream PCI devices. Here we set | ||
1255 | * the size of the resource to be 0 in order to | ||
1256 | * save more space. | ||
1257 | */ | ||
1258 | res->start = 0; | ||
1259 | res->end = -1; | ||
1252 | res->flags = 0; | 1260 | res->flags = 0; |
1253 | } | 1261 | } |
1254 | 1262 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1a1f2ddfb581..50e504c29bb9 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -258,6 +258,7 @@ void do_send_trap(struct pt_regs *regs, unsigned long address, | |||
258 | { | 258 | { |
259 | siginfo_t info; | 259 | siginfo_t info; |
260 | 260 | ||
261 | current->thread.trap_nr = signal_code; | ||
261 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 262 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, |
262 | 11, SIGSEGV) == NOTIFY_STOP) | 263 | 11, SIGSEGV) == NOTIFY_STOP) |
263 | return; | 264 | return; |
@@ -275,6 +276,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address, | |||
275 | { | 276 | { |
276 | siginfo_t info; | 277 | siginfo_t info; |
277 | 278 | ||
279 | current->thread.trap_nr = TRAP_HWBKPT; | ||
278 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 280 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, |
279 | 11, SIGSEGV) == NOTIFY_STOP) | 281 | 11, SIGSEGV) == NOTIFY_STOP) |
280 | return; | 282 | return; |
@@ -283,7 +285,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address, | |||
283 | return; | 285 | return; |
284 | 286 | ||
285 | /* Clear the DABR */ | 287 | /* Clear the DABR */ |
286 | set_dabr(0); | 288 | set_dabr(0, 0); |
287 | 289 | ||
288 | /* Deliver the signal to userspace */ | 290 | /* Deliver the signal to userspace */ |
289 | info.si_signo = SIGTRAP; | 291 | info.si_signo = SIGTRAP; |
@@ -364,18 +366,19 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
364 | { | 366 | { |
365 | if (thread->dabr) { | 367 | if (thread->dabr) { |
366 | thread->dabr = 0; | 368 | thread->dabr = 0; |
367 | set_dabr(0); | 369 | thread->dabrx = 0; |
370 | set_dabr(0, 0); | ||
368 | } | 371 | } |
369 | } | 372 | } |
370 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | 373 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ |
371 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 374 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
372 | 375 | ||
373 | int set_dabr(unsigned long dabr) | 376 | int set_dabr(unsigned long dabr, unsigned long dabrx) |
374 | { | 377 | { |
375 | __get_cpu_var(current_dabr) = dabr; | 378 | __get_cpu_var(current_dabr) = dabr; |
376 | 379 | ||
377 | if (ppc_md.set_dabr) | 380 | if (ppc_md.set_dabr) |
378 | return ppc_md.set_dabr(dabr); | 381 | return ppc_md.set_dabr(dabr, dabrx); |
379 | 382 | ||
380 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ | 383 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ |
381 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 384 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
@@ -385,9 +388,8 @@ int set_dabr(unsigned long dabr) | |||
385 | #endif | 388 | #endif |
386 | #elif defined(CONFIG_PPC_BOOK3S) | 389 | #elif defined(CONFIG_PPC_BOOK3S) |
387 | mtspr(SPRN_DABR, dabr); | 390 | mtspr(SPRN_DABR, dabr); |
391 | mtspr(SPRN_DABRX, dabrx); | ||
388 | #endif | 392 | #endif |
389 | |||
390 | |||
391 | return 0; | 393 | return 0; |
392 | } | 394 | } |
393 | 395 | ||
@@ -480,7 +482,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
480 | */ | 482 | */ |
481 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 483 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
482 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) | 484 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) |
483 | set_dabr(new->thread.dabr); | 485 | set_dabr(new->thread.dabr, new->thread.dabrx); |
484 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 486 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
485 | #endif | 487 | #endif |
486 | 488 | ||
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index f191bf02943a..37725e86651e 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -78,7 +78,7 @@ static int __init early_parse_mem(char *p) | |||
78 | return 1; | 78 | return 1; |
79 | 79 | ||
80 | memory_limit = PAGE_ALIGN(memparse(p, &p)); | 80 | memory_limit = PAGE_ALIGN(memparse(p, &p)); |
81 | DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit); | 81 | DBG("memory limit = 0x%llx\n", memory_limit); |
82 | 82 | ||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
@@ -661,7 +661,7 @@ void __init early_init_devtree(void *params) | |||
661 | 661 | ||
662 | /* make sure we've parsed cmdline for mem= before this */ | 662 | /* make sure we've parsed cmdline for mem= before this */ |
663 | if (memory_limit) | 663 | if (memory_limit) |
664 | first_memblock_size = min(first_memblock_size, memory_limit); | 664 | first_memblock_size = min_t(u64, first_memblock_size, memory_limit); |
665 | setup_initial_memory_limit(memstart_addr, first_memblock_size); | 665 | setup_initial_memory_limit(memstart_addr, first_memblock_size); |
666 | /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ | 666 | /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ |
667 | memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); | 667 | memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 0794a3017b1b..ce68278a5d73 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1691,7 +1691,7 @@ static void __init prom_initialize_tce_table(void) | |||
1691 | * else will impact performance, so we always allocate 8MB. | 1691 | * else will impact performance, so we always allocate 8MB. |
1692 | * Anton | 1692 | * Anton |
1693 | */ | 1693 | */ |
1694 | if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) | 1694 | if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p)) |
1695 | minsize = 8UL << 20; | 1695 | minsize = 8UL << 20; |
1696 | else | 1696 | else |
1697 | minsize = 4UL << 20; | 1697 | minsize = 4UL << 20; |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index c10fc28b9092..79d8e56470df 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -960,6 +960,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
960 | thread->ptrace_bps[0] = bp; | 960 | thread->ptrace_bps[0] = bp; |
961 | ptrace_put_breakpoints(task); | 961 | ptrace_put_breakpoints(task); |
962 | thread->dabr = data; | 962 | thread->dabr = data; |
963 | thread->dabrx = DABRX_ALL; | ||
963 | return 0; | 964 | return 0; |
964 | } | 965 | } |
965 | 966 | ||
@@ -983,6 +984,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
983 | 984 | ||
984 | /* Move contents to the DABR register */ | 985 | /* Move contents to the DABR register */ |
985 | task->thread.dabr = data; | 986 | task->thread.dabr = data; |
987 | task->thread.dabrx = DABRX_ALL; | ||
986 | #else /* CONFIG_PPC_ADV_DEBUG_REGS */ | 988 | #else /* CONFIG_PPC_ADV_DEBUG_REGS */ |
987 | /* As described above, it was assumed 3 bits were passed with the data | 989 | /* As described above, it was assumed 3 bits were passed with the data |
988 | * address, but we will assume only the mode bits will be passed | 990 | * address, but we will assume only the mode bits will be passed |
@@ -1397,6 +1399,7 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1397 | dabr |= DABR_DATA_WRITE; | 1399 | dabr |= DABR_DATA_WRITE; |
1398 | 1400 | ||
1399 | child->thread.dabr = dabr; | 1401 | child->thread.dabr = dabr; |
1402 | child->thread.dabrx = DABRX_ALL; | ||
1400 | 1403 | ||
1401 | return 1; | 1404 | return 1; |
1402 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | 1405 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 2c0ee6405633..20b0120db0c3 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <asm/delay.h> | 21 | #include <asm/delay.h> |
22 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
23 | #include <asm/rtas.h> | 23 | #include <asm/rtas.h> |
24 | #include <asm/abs_addr.h> | ||
25 | 24 | ||
26 | #define MODULE_VERS "1.0" | 25 | #define MODULE_VERS "1.0" |
27 | #define MODULE_NAME "rtas_flash" | 26 | #define MODULE_NAME "rtas_flash" |
@@ -582,7 +581,7 @@ static void rtas_flash_firmware(int reboot_type) | |||
582 | flist = (struct flash_block_list *)&rtas_data_buf[0]; | 581 | flist = (struct flash_block_list *)&rtas_data_buf[0]; |
583 | flist->num_blocks = 0; | 582 | flist->num_blocks = 0; |
584 | flist->next = rtas_firmware_flash_list; | 583 | flist->next = rtas_firmware_flash_list; |
585 | rtas_block_list = virt_to_abs(flist); | 584 | rtas_block_list = __pa(flist); |
586 | if (rtas_block_list >= 4UL*1024*1024*1024) { | 585 | if (rtas_block_list >= 4UL*1024*1024*1024) { |
587 | printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); | 586 | printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); |
588 | spin_unlock(&rtas_data_buf_lock); | 587 | spin_unlock(&rtas_data_buf_lock); |
@@ -596,13 +595,13 @@ static void rtas_flash_firmware(int reboot_type) | |||
596 | for (f = flist; f; f = next) { | 595 | for (f = flist; f; f = next) { |
597 | /* Translate data addrs to absolute */ | 596 | /* Translate data addrs to absolute */ |
598 | for (i = 0; i < f->num_blocks; i++) { | 597 | for (i = 0; i < f->num_blocks; i++) { |
599 | f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data); | 598 | f->blocks[i].data = (char *)__pa(f->blocks[i].data); |
600 | image_size += f->blocks[i].length; | 599 | image_size += f->blocks[i].length; |
601 | } | 600 | } |
602 | next = f->next; | 601 | next = f->next; |
603 | /* Don't translate NULL pointer for last entry */ | 602 | /* Don't translate NULL pointer for last entry */ |
604 | if (f->next) | 603 | if (f->next) |
605 | f->next = (struct flash_block_list *)virt_to_abs(f->next); | 604 | f->next = (struct flash_block_list *)__pa(f->next); |
606 | else | 605 | else |
607 | f->next = NULL; | 606 | f->next = NULL; |
608 | /* make num_blocks into the version/length field */ | 607 | /* make num_blocks into the version/length field */ |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 179af906dcda..6de63e3250bb 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -81,7 +81,7 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) | |||
81 | return PCIBIOS_DEVICE_NOT_FOUND; | 81 | return PCIBIOS_DEVICE_NOT_FOUND; |
82 | 82 | ||
83 | if (returnval == EEH_IO_ERROR_VALUE(size) && | 83 | if (returnval == EEH_IO_ERROR_VALUE(size) && |
84 | eeh_dn_check_failure (pdn->node, NULL)) | 84 | eeh_dev_check_failure(of_node_to_eeh_dev(pdn->node))) |
85 | return PCIBIOS_DEVICE_NOT_FOUND; | 85 | return PCIBIOS_DEVICE_NOT_FOUND; |
86 | 86 | ||
87 | return PCIBIOS_SUCCESSFUL; | 87 | return PCIBIOS_SUCCESSFUL; |
@@ -275,9 +275,6 @@ void __init find_and_init_phbs(void) | |||
275 | of_node_put(root); | 275 | of_node_put(root); |
276 | pci_devs_phb_init(); | 276 | pci_devs_phb_init(); |
277 | 277 | ||
278 | /* Create EEH devices for all PHBs */ | ||
279 | eeh_dev_phb_init(); | ||
280 | |||
281 | /* | 278 | /* |
282 | * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties | 279 | * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties |
283 | * in chosen. | 280 | * in chosen. |
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 5c023c9cf16e..a2dc75793bd5 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/tracehook.h> | 12 | #include <linux/tracehook.h> |
13 | #include <linux/signal.h> | 13 | #include <linux/signal.h> |
14 | #include <linux/uprobes.h> | ||
14 | #include <linux/key.h> | 15 | #include <linux/key.h> |
15 | #include <asm/hw_breakpoint.h> | 16 | #include <asm/hw_breakpoint.h> |
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
@@ -130,7 +131,7 @@ static int do_signal(struct pt_regs *regs) | |||
130 | * triggered inside the kernel. | 131 | * triggered inside the kernel. |
131 | */ | 132 | */ |
132 | if (current->thread.dabr) | 133 | if (current->thread.dabr) |
133 | set_dabr(current->thread.dabr); | 134 | set_dabr(current->thread.dabr, current->thread.dabrx); |
134 | #endif | 135 | #endif |
135 | /* Re-enable the breakpoints for the signal stack */ | 136 | /* Re-enable the breakpoints for the signal stack */ |
136 | thread_change_pc(current, regs); | 137 | thread_change_pc(current, regs); |
@@ -157,6 +158,11 @@ static int do_signal(struct pt_regs *regs) | |||
157 | 158 | ||
158 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | 159 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
159 | { | 160 | { |
161 | if (thread_info_flags & _TIF_UPROBE) { | ||
162 | clear_thread_flag(TIF_UPROBE); | ||
163 | uprobe_notify_resume(regs); | ||
164 | } | ||
165 | |||
160 | if (thread_info_flags & _TIF_SIGPENDING) | 166 | if (thread_info_flags & _TIF_SIGPENDING) |
161 | do_signal(regs); | 167 | do_signal(regs); |
162 | 168 | ||
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index ae0843fa7a61..32518401af68 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -251,6 +251,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | |||
251 | if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) | 251 | if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) |
252 | local_irq_enable(); | 252 | local_irq_enable(); |
253 | 253 | ||
254 | current->thread.trap_nr = code; | ||
254 | memset(&info, 0, sizeof(info)); | 255 | memset(&info, 0, sizeof(info)); |
255 | info.si_signo = signr; | 256 | info.si_signo = signr; |
256 | info.si_code = code; | 257 | info.si_code = code; |
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c new file mode 100644 index 000000000000..d2d46d1014f8 --- /dev/null +++ b/arch/powerpc/kernel/uprobes.c | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * User-space Probes (UProbes) for powerpc | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2007-2012 | ||
19 | * | ||
20 | * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com> | ||
21 | */ | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/uprobes.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | #include <linux/kdebug.h> | ||
28 | |||
29 | #include <asm/sstep.h> | ||
30 | |||
31 | #define UPROBE_TRAP_NR UINT_MAX | ||
32 | |||
33 | /** | ||
34 | * arch_uprobe_analyze_insn | ||
35 | * @mm: the probed address space. | ||
36 | * @arch_uprobe: the probepoint information. | ||
37 | * @addr: vaddr to probe. | ||
38 | * Return 0 on success or a -ve number on error. | ||
39 | */ | ||
40 | int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, | ||
41 | struct mm_struct *mm, unsigned long addr) | ||
42 | { | ||
43 | if (addr & 0x03) | ||
44 | return -EINVAL; | ||
45 | |||
46 | /* | ||
47 | * We currently don't support a uprobe on an already | ||
48 | * existing breakpoint instruction underneath | ||
49 | */ | ||
50 | if (is_trap(auprobe->ainsn)) | ||
51 | return -ENOTSUPP; | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * arch_uprobe_pre_xol - prepare to execute out of line. | ||
57 | * @auprobe: the probepoint information. | ||
58 | * @regs: reflects the saved user state of current task. | ||
59 | */ | ||
60 | int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
61 | { | ||
62 | struct arch_uprobe_task *autask = ¤t->utask->autask; | ||
63 | |||
64 | autask->saved_trap_nr = current->thread.trap_nr; | ||
65 | current->thread.trap_nr = UPROBE_TRAP_NR; | ||
66 | regs->nip = current->utask->xol_vaddr; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs | ||
72 | * @regs: Reflects the saved state of the task after it has hit a breakpoint | ||
73 | * instruction. | ||
74 | * Return the address of the breakpoint instruction. | ||
75 | */ | ||
76 | unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) | ||
77 | { | ||
78 | return instruction_pointer(regs); | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc), | ||
83 | * then detect the case where a singlestepped instruction jumps back to its | ||
84 | * own address. It is assumed that anything like do_page_fault/do_trap/etc | ||
85 | * sets thread.trap_nr != UINT_MAX. | ||
86 | * | ||
87 | * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, | ||
88 | * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to | ||
89 | * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol(). | ||
90 | */ | ||
91 | bool arch_uprobe_xol_was_trapped(struct task_struct *t) | ||
92 | { | ||
93 | if (t->thread.trap_nr != UPROBE_TRAP_NR) | ||
94 | return true; | ||
95 | |||
96 | return false; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Called after single-stepping. To avoid the SMP problems that can | ||
101 | * occur when we temporarily put back the original opcode to | ||
102 | * single-step, we single-stepped a copy of the instruction. | ||
103 | * | ||
104 | * This function prepares to resume execution after the single-step. | ||
105 | */ | ||
106 | int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
107 | { | ||
108 | struct uprobe_task *utask = current->utask; | ||
109 | |||
110 | WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); | ||
111 | |||
112 | current->thread.trap_nr = utask->autask.saved_trap_nr; | ||
113 | |||
114 | /* | ||
115 | * On powerpc, except for loads and stores, most instructions | ||
116 | * including ones that alter code flow (branches, calls, returns) | ||
117 | * are emulated in the kernel. We get here only if the emulation | ||
118 | * support doesn't exist and have to fix-up the next instruction | ||
119 | * to be executed. | ||
120 | */ | ||
121 | regs->nip = utask->vaddr + MAX_UINSN_BYTES; | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | /* callback routine for handling exceptions. */ | ||
126 | int arch_uprobe_exception_notify(struct notifier_block *self, | ||
127 | unsigned long val, void *data) | ||
128 | { | ||
129 | struct die_args *args = data; | ||
130 | struct pt_regs *regs = args->regs; | ||
131 | |||
132 | /* regs == NULL is a kernel bug */ | ||
133 | if (WARN_ON(!regs)) | ||
134 | return NOTIFY_DONE; | ||
135 | |||
136 | /* We are only interested in userspace traps */ | ||
137 | if (!user_mode(regs)) | ||
138 | return NOTIFY_DONE; | ||
139 | |||
140 | switch (val) { | ||
141 | case DIE_BPT: | ||
142 | if (uprobe_pre_sstep_notifier(regs)) | ||
143 | return NOTIFY_STOP; | ||
144 | break; | ||
145 | case DIE_SSTEP: | ||
146 | if (uprobe_post_sstep_notifier(regs)) | ||
147 | return NOTIFY_STOP; | ||
148 | default: | ||
149 | break; | ||
150 | } | ||
151 | return NOTIFY_DONE; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * This function gets called when XOL instruction either gets trapped or | ||
156 | * the thread has a fatal signal, so reset the instruction pointer to its | ||
157 | * probed address. | ||
158 | */ | ||
159 | void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
160 | { | ||
161 | struct uprobe_task *utask = current->utask; | ||
162 | |||
163 | current->thread.trap_nr = utask->autask.saved_trap_nr; | ||
164 | instruction_pointer_set(regs, utask->vaddr); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * See if the instruction can be emulated. | ||
169 | * Returns true if instruction was emulated, false otherwise. | ||
170 | */ | ||
171 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
172 | { | ||
173 | int ret; | ||
174 | |||
175 | /* | ||
176 | * emulate_step() returns 1 if the insn was successfully emulated. | ||
177 | * For all other cases, we need to single-step in hardware. | ||
178 | */ | ||
179 | ret = emulate_step(regs, auprobe->ainsn); | ||
180 | if (ret > 0) | ||
181 | return true; | ||
182 | |||
183 | return false; | ||
184 | } | ||
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index b67db22e102d..1b2076f049ce 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -723,9 +723,7 @@ int __cpuinit vdso_getcpu_init(void) | |||
723 | 723 | ||
724 | val = (cpu & 0xfff) | ((node & 0xffff) << 16); | 724 | val = (cpu & 0xfff) | ((node & 0xffff) << 16); |
725 | mtspr(SPRN_SPRG3, val); | 725 | mtspr(SPRN_SPRG3, val); |
726 | #ifdef CONFIG_KVM_BOOK3S_HANDLER | 726 | get_paca()->sprg3 = val; |
727 | get_paca()->kvm_hstate.sprg3 = val; | ||
728 | #endif | ||
729 | 727 | ||
730 | put_cpu(); | 728 | put_cpu(); |
731 | 729 | ||
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 02b32216bbc3..201ba59738be 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <asm/prom.h> | 33 | #include <asm/prom.h> |
34 | #include <asm/firmware.h> | 34 | #include <asm/firmware.h> |
35 | #include <asm/tce.h> | 35 | #include <asm/tce.h> |
36 | #include <asm/abs_addr.h> | ||
37 | #include <asm/page.h> | 36 | #include <asm/page.h> |
38 | #include <asm/hvcall.h> | 37 | #include <asm/hvcall.h> |
39 | 38 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 44b72feaff7d..74a24bbb9637 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1065,7 +1065,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
1065 | mtspr SPRN_DABRX,r6 | 1065 | mtspr SPRN_DABRX,r6 |
1066 | 1066 | ||
1067 | /* Restore SPRG3 */ | 1067 | /* Restore SPRG3 */ |
1068 | ld r3,HSTATE_SPRG3(r13) | 1068 | ld r3,PACA_SPRG3(r13) |
1069 | mtspr SPRN_SPRG3,r3 | 1069 | mtspr SPRN_SPRG3,r3 |
1070 | 1070 | ||
1071 | /* | 1071 | /* |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 08ffcf52a856..995f924e007f 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -133,6 +133,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address) | |||
133 | up_read(¤t->mm->mmap_sem); | 133 | up_read(¤t->mm->mmap_sem); |
134 | 134 | ||
135 | if (user_mode(regs)) { | 135 | if (user_mode(regs)) { |
136 | current->thread.trap_nr = BUS_ADRERR; | ||
136 | info.si_signo = SIGBUS; | 137 | info.si_signo = SIGBUS; |
137 | info.si_errno = 0; | 138 | info.si_errno = 0; |
138 | info.si_code = BUS_ADRERR; | 139 | info.si_code = BUS_ADRERR; |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 90039bc64119..f21e8ce8db33 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -14,10 +14,10 @@ | |||
14 | 14 | ||
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/of.h> | ||
17 | #include <linux/threads.h> | 18 | #include <linux/threads.h> |
18 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
19 | 20 | ||
20 | #include <asm/abs_addr.h> | ||
21 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
22 | #include <asm/mmu.h> | 22 | #include <asm/mmu.h> |
23 | #include <asm/mmu_context.h> | 23 | #include <asm/mmu_context.h> |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 377e5cbedbbb..ba45739bdfe8 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
44 | #include <asm/machdep.h> | 44 | #include <asm/machdep.h> |
45 | #include <asm/prom.h> | 45 | #include <asm/prom.h> |
46 | #include <asm/abs_addr.h> | ||
47 | #include <asm/tlbflush.h> | 46 | #include <asm/tlbflush.h> |
48 | #include <asm/io.h> | 47 | #include <asm/io.h> |
49 | #include <asm/eeh.h> | 48 | #include <asm/eeh.h> |
@@ -651,7 +650,7 @@ static void __init htab_initialize(void) | |||
651 | DBG("Hash table allocated at %lx, size: %lx\n", table, | 650 | DBG("Hash table allocated at %lx, size: %lx\n", table, |
652 | htab_size_bytes); | 651 | htab_size_bytes); |
653 | 652 | ||
654 | htab_address = abs_to_virt(table); | 653 | htab_address = __va(table); |
655 | 654 | ||
656 | /* htab absolute addr + encoded htabsize */ | 655 | /* htab absolute addr + encoded htabsize */ |
657 | _SDR1 = table + __ilog2(pteg_count) - 11; | 656 | _SDR1 = table + __ilog2(pteg_count) - 11; |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 620b7acd2fdf..95a45293e5ac 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -62,7 +62,6 @@ | |||
62 | #include <asm/cputable.h> | 62 | #include <asm/cputable.h> |
63 | #include <asm/sections.h> | 63 | #include <asm/sections.h> |
64 | #include <asm/iommu.h> | 64 | #include <asm/iommu.h> |
65 | #include <asm/abs_addr.h> | ||
66 | #include <asm/vdso.h> | 65 | #include <asm/vdso.h> |
67 | 66 | ||
68 | #include "mmu_decl.h" | 67 | #include "mmu_decl.h" |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index fbdad0e3929a..44cf2b20503d 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -62,7 +62,7 @@ | |||
62 | 62 | ||
63 | int init_bootmem_done; | 63 | int init_bootmem_done; |
64 | int mem_init_done; | 64 | int mem_init_done; |
65 | phys_addr_t memory_limit; | 65 | unsigned long long memory_limit; |
66 | 66 | ||
67 | #ifdef CONFIG_HIGHMEM | 67 | #ifdef CONFIG_HIGHMEM |
68 | pte_t *kmap_pte; | 68 | pte_t *kmap_pte; |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 249a0631c4db..297d49547ea8 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -51,7 +51,6 @@ | |||
51 | #include <asm/processor.h> | 51 | #include <asm/processor.h> |
52 | #include <asm/cputable.h> | 52 | #include <asm/cputable.h> |
53 | #include <asm/sections.h> | 53 | #include <asm/sections.h> |
54 | #include <asm/abs_addr.h> | ||
55 | #include <asm/firmware.h> | 54 | #include <asm/firmware.h> |
56 | 55 | ||
57 | #include "mmu_decl.h" | 56 | #include "mmu_decl.h" |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 9106ebb118f5..3f8efa6f2997 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <asm/paca.h> | 20 | #include <asm/paca.h> |
21 | #include <asm/cputable.h> | 21 | #include <asm/cputable.h> |
22 | #include <asm/prom.h> | 22 | #include <asm/prom.h> |
23 | #include <asm/abs_addr.h> | ||
24 | 23 | ||
25 | struct stab_entry { | 24 | struct stab_entry { |
26 | unsigned long esid_data; | 25 | unsigned long esid_data; |
@@ -257,7 +256,7 @@ void __init stabs_alloc(void) | |||
257 | memset((void *)newstab, 0, HW_PAGE_SIZE); | 256 | memset((void *)newstab, 0, HW_PAGE_SIZE); |
258 | 257 | ||
259 | paca[cpu].stab_addr = newstab; | 258 | paca[cpu].stab_addr = newstab; |
260 | paca[cpu].stab_real = virt_to_abs(newstab); | 259 | paca[cpu].stab_real = __pa(newstab); |
261 | printk(KERN_INFO "Segment table for CPU %d at 0x%llx " | 260 | printk(KERN_INFO "Segment table for CPU %d at 0x%llx " |
262 | "virtual, 0x%llx absolute\n", | 261 | "virtual, 0x%llx absolute\n", |
263 | cpu, paca[cpu].stab_addr, paca[cpu].stab_real); | 262 | cpu, paca[cpu].stab_addr, paca[cpu].stab_real); |
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index e4f8f1fc81a5..7c415ddde948 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c | |||
@@ -95,7 +95,8 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) | |||
95 | struct mm_struct *mm = current->mm; | 95 | struct mm_struct *mm = current->mm; |
96 | struct subpage_prot_table *spt = &mm->context.spt; | 96 | struct subpage_prot_table *spt = &mm->context.spt; |
97 | u32 **spm, *spp; | 97 | u32 **spm, *spp; |
98 | int i, nw; | 98 | unsigned long i; |
99 | size_t nw; | ||
99 | unsigned long next, limit; | 100 | unsigned long next, limit; |
100 | 101 | ||
101 | down_write(&mm->mmap_sem); | 102 | down_write(&mm->mmap_sem); |
@@ -144,7 +145,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) | |||
144 | struct mm_struct *mm = current->mm; | 145 | struct mm_struct *mm = current->mm; |
145 | struct subpage_prot_table *spt = &mm->context.spt; | 146 | struct subpage_prot_table *spt = &mm->context.spt; |
146 | u32 **spm, *spp; | 147 | u32 **spm, *spp; |
147 | int i, nw; | 148 | unsigned long i; |
149 | size_t nw; | ||
148 | unsigned long next, limit; | 150 | unsigned long next, limit; |
149 | int err; | 151 | int err; |
150 | 152 | ||
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index f09d48e3268d..b4113bf86353 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
21 | #include <asm/exception-64e.h> | 21 | #include <asm/exception-64e.h> |
22 | #include <asm/ppc-opcode.h> | 22 | #include <asm/ppc-opcode.h> |
23 | #include <asm/kvm_asm.h> | ||
24 | #include <asm/kvm_booke_hv_asm.h> | ||
23 | 25 | ||
24 | #ifdef CONFIG_PPC_64K_PAGES | 26 | #ifdef CONFIG_PPC_64K_PAGES |
25 | #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1) | 27 | #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1) |
@@ -37,12 +39,18 @@ | |||
37 | * * | 39 | * * |
38 | **********************************************************************/ | 40 | **********************************************************************/ |
39 | 41 | ||
40 | .macro tlb_prolog_bolted addr | 42 | .macro tlb_prolog_bolted intnum addr |
41 | mtspr SPRN_SPRG_TLB_SCRATCH,r13 | 43 | mtspr SPRN_SPRG_GEN_SCRATCH,r13 |
42 | mfspr r13,SPRN_SPRG_PACA | 44 | mfspr r13,SPRN_SPRG_PACA |
43 | std r10,PACA_EXTLB+EX_TLB_R10(r13) | 45 | std r10,PACA_EXTLB+EX_TLB_R10(r13) |
44 | mfcr r10 | 46 | mfcr r10 |
45 | std r11,PACA_EXTLB+EX_TLB_R11(r13) | 47 | std r11,PACA_EXTLB+EX_TLB_R11(r13) |
48 | #ifdef CONFIG_KVM_BOOKE_HV | ||
49 | BEGIN_FTR_SECTION | ||
50 | mfspr r11, SPRN_SRR1 | ||
51 | END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) | ||
52 | #endif | ||
53 | DO_KVM \intnum, SPRN_SRR1 | ||
46 | std r16,PACA_EXTLB+EX_TLB_R16(r13) | 54 | std r16,PACA_EXTLB+EX_TLB_R16(r13) |
47 | mfspr r16,\addr /* get faulting address */ | 55 | mfspr r16,\addr /* get faulting address */ |
48 | std r14,PACA_EXTLB+EX_TLB_R14(r13) | 56 | std r14,PACA_EXTLB+EX_TLB_R14(r13) |
@@ -61,12 +69,12 @@ | |||
61 | ld r15,PACA_EXTLB+EX_TLB_R15(r13) | 69 | ld r15,PACA_EXTLB+EX_TLB_R15(r13) |
62 | TLB_MISS_RESTORE_STATS_BOLTED | 70 | TLB_MISS_RESTORE_STATS_BOLTED |
63 | ld r16,PACA_EXTLB+EX_TLB_R16(r13) | 71 | ld r16,PACA_EXTLB+EX_TLB_R16(r13) |
64 | mfspr r13,SPRN_SPRG_TLB_SCRATCH | 72 | mfspr r13,SPRN_SPRG_GEN_SCRATCH |
65 | .endm | 73 | .endm |
66 | 74 | ||
67 | /* Data TLB miss */ | 75 | /* Data TLB miss */ |
68 | START_EXCEPTION(data_tlb_miss_bolted) | 76 | START_EXCEPTION(data_tlb_miss_bolted) |
69 | tlb_prolog_bolted SPRN_DEAR | 77 | tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR |
70 | 78 | ||
71 | /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ | 79 | /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ |
72 | 80 | ||
@@ -214,7 +222,7 @@ itlb_miss_fault_bolted: | |||
214 | 222 | ||
215 | /* Instruction TLB miss */ | 223 | /* Instruction TLB miss */ |
216 | START_EXCEPTION(instruction_tlb_miss_bolted) | 224 | START_EXCEPTION(instruction_tlb_miss_bolted) |
217 | tlb_prolog_bolted SPRN_SRR0 | 225 | tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0 |
218 | 226 | ||
219 | rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 | 227 | rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 |
220 | srdi r15,r16,60 /* get region */ | 228 | srdi r15,r16,60 /* get region */ |
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 95ae77dec3f6..315f9495e9b2 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c | |||
@@ -21,6 +21,13 @@ | |||
21 | #include <asm/reg.h> | 21 | #include <asm/reg.h> |
22 | 22 | ||
23 | #define dbg(args...) | 23 | #define dbg(args...) |
24 | #define OPROFILE_PM_PMCSEL_MSK 0xffULL | ||
25 | #define OPROFILE_PM_UNIT_SHIFT 60 | ||
26 | #define OPROFILE_PM_UNIT_MSK 0xfULL | ||
27 | #define OPROFILE_MAX_PMC_NUM 3 | ||
28 | #define OPROFILE_PMSEL_FIELD_WIDTH 8 | ||
29 | #define OPROFILE_UNIT_FIELD_WIDTH 4 | ||
30 | #define MMCRA_SIAR_VALID_MASK 0x10000000ULL | ||
24 | 31 | ||
25 | static unsigned long reset_value[OP_MAX_COUNTER]; | 32 | static unsigned long reset_value[OP_MAX_COUNTER]; |
26 | 33 | ||
@@ -31,6 +38,61 @@ static int use_slot_nums; | |||
31 | static u32 mmcr0_val; | 38 | static u32 mmcr0_val; |
32 | static u64 mmcr1_val; | 39 | static u64 mmcr1_val; |
33 | static u64 mmcra_val; | 40 | static u64 mmcra_val; |
41 | static u32 cntr_marked_events; | ||
42 | |||
43 | static int power7_marked_instr_event(u64 mmcr1) | ||
44 | { | ||
45 | u64 psel, unit; | ||
46 | int pmc, cntr_marked_events = 0; | ||
47 | |||
48 | /* Given the MMCR1 value, look at the field for each counter to | ||
49 | * determine if it is a marked event. Code based on the function | ||
50 | * power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c. | ||
51 | */ | ||
52 | for (pmc = 0; pmc < 4; pmc++) { | ||
53 | psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK | ||
54 | << (OPROFILE_MAX_PMC_NUM - pmc) | ||
55 | * OPROFILE_MAX_PMC_NUM); | ||
56 | psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc) | ||
57 | * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL; | ||
58 | unit = mmcr1 & (OPROFILE_PM_UNIT_MSK | ||
59 | << (OPROFILE_PM_UNIT_SHIFT | ||
60 | - (pmc * OPROFILE_PMSEL_FIELD_WIDTH ))); | ||
61 | unit = unit >> (OPROFILE_PM_UNIT_SHIFT | ||
62 | - (pmc * OPROFILE_PMSEL_FIELD_WIDTH)); | ||
63 | |||
64 | switch (psel >> 4) { | ||
65 | case 2: | ||
66 | cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc; | ||
67 | break; | ||
68 | case 3: | ||
69 | if (psel == 0x3c) { | ||
70 | cntr_marked_events |= (pmc == 0) << pmc; | ||
71 | break; | ||
72 | } | ||
73 | |||
74 | if (psel == 0x3e) { | ||
75 | cntr_marked_events |= (pmc != 1) << pmc; | ||
76 | break; | ||
77 | } | ||
78 | |||
79 | cntr_marked_events |= 1 << pmc; | ||
80 | break; | ||
81 | case 4: | ||
82 | case 5: | ||
83 | cntr_marked_events |= (unit == 0xd) << pmc; | ||
84 | break; | ||
85 | case 6: | ||
86 | if (psel == 0x64) | ||
87 | cntr_marked_events |= (pmc >= 2) << pmc; | ||
88 | break; | ||
89 | case 8: | ||
90 | cntr_marked_events |= (unit == 0xd) << pmc; | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | return cntr_marked_events; | ||
95 | } | ||
34 | 96 | ||
35 | static int power4_reg_setup(struct op_counter_config *ctr, | 97 | static int power4_reg_setup(struct op_counter_config *ctr, |
36 | struct op_system_config *sys, | 98 | struct op_system_config *sys, |
@@ -47,6 +109,23 @@ static int power4_reg_setup(struct op_counter_config *ctr, | |||
47 | mmcr1_val = sys->mmcr1; | 109 | mmcr1_val = sys->mmcr1; |
48 | mmcra_val = sys->mmcra; | 110 | mmcra_val = sys->mmcra; |
49 | 111 | ||
112 | /* Power 7+ and newer architectures: | ||
113 | * Determine which counter events in the group (the group of events is | ||
114 | * specified by the bit settings in the MMCR1 register) are marked | ||
115 | * events for use in the interrupt handler. Do the calculation once | ||
116 | * before OProfile starts. Information is used in the interrupt | ||
117 | * handler. Starting with Power 7+ we only record the sample for | ||
118 | * marked events if the SIAR valid bit is set. For non marked events | ||
119 | * the sample is always recorded. | ||
120 | */ | ||
121 | if (pvr_version_is(PVR_POWER7p)) | ||
122 | cntr_marked_events = power7_marked_instr_event(mmcr1_val); | ||
123 | else | ||
124 | cntr_marked_events = 0; /* For older processors, set the bit map | ||
125 | * to zero so the sample will always be | ||
126 | * be recorded. | ||
127 | */ | ||
128 | |||
50 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) | 129 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) |
51 | reset_value[i] = 0x80000000UL - ctr[i].count; | 130 | reset_value[i] = 0x80000000UL - ctr[i].count; |
52 | 131 | ||
@@ -61,10 +140,10 @@ static int power4_reg_setup(struct op_counter_config *ctr, | |||
61 | else | 140 | else |
62 | mmcr0_val |= MMCR0_PROBLEM_DISABLE; | 141 | mmcr0_val |= MMCR0_PROBLEM_DISABLE; |
63 | 142 | ||
64 | if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) || | 143 | if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) || |
65 | __is_processor(PV_970) || __is_processor(PV_970FX) || | 144 | pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) || |
66 | __is_processor(PV_970MP) || __is_processor(PV_970GX) || | 145 | pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) || |
67 | __is_processor(PV_POWER5) || __is_processor(PV_POWER5p)) | 146 | pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p)) |
68 | use_slot_nums = 1; | 147 | use_slot_nums = 1; |
69 | 148 | ||
70 | return 0; | 149 | return 0; |
@@ -84,9 +163,9 @@ extern void ppc_enable_pmcs(void); | |||
84 | */ | 163 | */ |
85 | static inline int mmcra_must_set_sample(void) | 164 | static inline int mmcra_must_set_sample(void) |
86 | { | 165 | { |
87 | if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) || | 166 | if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) || |
88 | __is_processor(PV_970) || __is_processor(PV_970FX) || | 167 | pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) || |
89 | __is_processor(PV_970MP) || __is_processor(PV_970GX)) | 168 | pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX)) |
90 | return 1; | 169 | return 1; |
91 | 170 | ||
92 | return 0; | 171 | return 0; |
@@ -276,7 +355,7 @@ static bool pmc_overflow(unsigned long val) | |||
276 | * PMCs because a user might set a period of less than 256 and we | 355 | * PMCs because a user might set a period of less than 256 and we |
277 | * don't want to mistakenly reset them. | 356 | * don't want to mistakenly reset them. |
278 | */ | 357 | */ |
279 | if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) | 358 | if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256)) |
280 | return true; | 359 | return true; |
281 | 360 | ||
282 | return false; | 361 | return false; |
@@ -291,6 +370,7 @@ static void power4_handle_interrupt(struct pt_regs *regs, | |||
291 | int i; | 370 | int i; |
292 | unsigned int mmcr0; | 371 | unsigned int mmcr0; |
293 | unsigned long mmcra; | 372 | unsigned long mmcra; |
373 | bool siar_valid = false; | ||
294 | 374 | ||
295 | mmcra = mfspr(SPRN_MMCRA); | 375 | mmcra = mfspr(SPRN_MMCRA); |
296 | 376 | ||
@@ -300,11 +380,29 @@ static void power4_handle_interrupt(struct pt_regs *regs, | |||
300 | /* set the PMM bit (see comment below) */ | 380 | /* set the PMM bit (see comment below) */ |
301 | mtmsrd(mfmsr() | MSR_PMM); | 381 | mtmsrd(mfmsr() | MSR_PMM); |
302 | 382 | ||
383 | /* Check that the SIAR valid bit in MMCRA is set to 1. */ | ||
384 | if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK) | ||
385 | siar_valid = true; | ||
386 | |||
303 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { | 387 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { |
304 | val = classic_ctr_read(i); | 388 | val = classic_ctr_read(i); |
305 | if (pmc_overflow(val)) { | 389 | if (pmc_overflow(val)) { |
306 | if (oprofile_running && ctr[i].enabled) { | 390 | if (oprofile_running && ctr[i].enabled) { |
307 | oprofile_add_ext_sample(pc, regs, i, is_kernel); | 391 | /* Power 7+ and newer architectures: |
392 | * If the event is a marked event, then only | ||
393 | * save the sample if the SIAR valid bit is | ||
394 | * set. If the event is not marked, then | ||
395 | * always save the sample. | ||
396 | * Note, the Sample enable bit in the MMCRA | ||
397 | * register must be set to 1 if the group | ||
398 | * contains a marked event. | ||
399 | */ | ||
400 | if ((siar_valid && | ||
401 | (cntr_marked_events & (1 << i))) | ||
402 | || !(cntr_marked_events & (1 << i))) | ||
403 | oprofile_add_ext_sample(pc, regs, i, | ||
404 | is_kernel); | ||
405 | |||
308 | classic_ctr_write(i, reset_value[i]); | 406 | classic_ctr_write(i, reset_value[i]); |
309 | } else { | 407 | } else { |
310 | classic_ctr_write(i, 0); | 408 | classic_ctr_write(i, 0); |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 7cd2dbd6e4c4..fb55da91aa45 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -1396,7 +1396,7 @@ static bool pmc_overflow(unsigned long val) | |||
1396 | * PMCs because a user might set a period of less than 256 and we | 1396 | * PMCs because a user might set a period of less than 256 and we |
1397 | * don't want to mistakenly reset them. | 1397 | * don't want to mistakenly reset them. |
1398 | */ | 1398 | */ |
1399 | if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) | 1399 | if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256)) |
1400 | return true; | 1400 | return true; |
1401 | 1401 | ||
1402 | return false; | 1402 | return false; |
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c index 852592b2b712..affcf566d460 100644 --- a/arch/powerpc/platforms/cell/beat.c +++ b/arch/powerpc/platforms/cell/beat.c | |||
@@ -136,9 +136,9 @@ ssize_t beat_nvram_get_size(void) | |||
136 | return BEAT_NVRAM_SIZE; | 136 | return BEAT_NVRAM_SIZE; |
137 | } | 137 | } |
138 | 138 | ||
139 | int beat_set_xdabr(unsigned long dabr) | 139 | int beat_set_xdabr(unsigned long dabr, unsigned long dabrx) |
140 | { | 140 | { |
141 | if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER)) | 141 | if (beat_set_dabr(dabr, dabrx)) |
142 | return -1; | 142 | return -1; |
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h index 32c8efcedc80..bfcb8e351ae5 100644 --- a/arch/powerpc/platforms/cell/beat.h +++ b/arch/powerpc/platforms/cell/beat.h | |||
@@ -32,7 +32,7 @@ void beat_get_rtc_time(struct rtc_time *); | |||
32 | ssize_t beat_nvram_get_size(void); | 32 | ssize_t beat_nvram_get_size(void); |
33 | ssize_t beat_nvram_read(char *, size_t, loff_t *); | 33 | ssize_t beat_nvram_read(char *, size_t, loff_t *); |
34 | ssize_t beat_nvram_write(char *, size_t, loff_t *); | 34 | ssize_t beat_nvram_write(char *, size_t, loff_t *); |
35 | int beat_set_xdabr(unsigned long); | 35 | int beat_set_xdabr(unsigned long, unsigned long); |
36 | void beat_power_save(void); | 36 | void beat_power_save(void); |
37 | void beat_kexec_cpu_down(int, int); | 37 | void beat_kexec_cpu_down(int, int); |
38 | 38 | ||
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 14943ef01918..7d2d036754b5 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c | |||
@@ -19,12 +19,12 @@ | |||
19 | 19 | ||
20 | #undef DEBUG | 20 | #undef DEBUG |
21 | 21 | ||
22 | #include <linux/memblock.h> | ||
22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
23 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
24 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
25 | #include <asm/iommu.h> | 26 | #include <asm/iommu.h> |
26 | #include <asm/machdep.h> | 27 | #include <asm/machdep.h> |
27 | #include <asm/abs_addr.h> | ||
28 | #include <asm/firmware.h> | 28 | #include <asm/firmware.h> |
29 | 29 | ||
30 | #define IOBMAP_PAGE_SHIFT 12 | 30 | #define IOBMAP_PAGE_SHIFT 12 |
@@ -99,7 +99,7 @@ static int iobmap_build(struct iommu_table *tbl, long index, | |||
99 | ip = ((u32 *)tbl->it_base) + index; | 99 | ip = ((u32 *)tbl->it_base) + index; |
100 | 100 | ||
101 | while (npages--) { | 101 | while (npages--) { |
102 | rpn = virt_to_abs(uaddr) >> IOBMAP_PAGE_SHIFT; | 102 | rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT; |
103 | 103 | ||
104 | *(ip++) = IOBMAP_L2E_V | rpn; | 104 | *(ip++) = IOBMAP_L2E_V | rpn; |
105 | /* invalidate tlb, can be optimized more */ | 105 | /* invalidate tlb, can be optimized more */ |
@@ -258,7 +258,7 @@ void __init alloc_iobmap_l2(void) | |||
258 | return; | 258 | return; |
259 | #endif | 259 | #endif |
260 | /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ | 260 | /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ |
261 | iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); | 261 | iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); |
262 | 262 | ||
263 | printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base); | 263 | printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base); |
264 | } | 264 | } |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 0e7eccc0f88d..cae7281e4e66 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <asm/opal.h> | 30 | #include <asm/opal.h> |
31 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
32 | #include <asm/tce.h> | 32 | #include <asm/tce.h> |
33 | #include <asm/abs_addr.h> | ||
34 | 33 | ||
35 | #include "powernv.h" | 34 | #include "powernv.h" |
36 | #include "pci.h" | 35 | #include "pci.h" |
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c index 264967770c3a..6b4bef4e9d82 100644 --- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c +++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <asm/opal.h> | 30 | #include <asm/opal.h> |
31 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
32 | #include <asm/tce.h> | 32 | #include <asm/tce.h> |
33 | #include <asm/abs_addr.h> | ||
34 | 33 | ||
35 | #include "powernv.h" | 34 | #include "powernv.h" |
36 | #include "pci.h" | 35 | #include "pci.h" |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index be3cfc5ceabb..c01688a1a741 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <asm/opal.h> | 30 | #include <asm/opal.h> |
31 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
32 | #include <asm/tce.h> | 32 | #include <asm/tce.h> |
33 | #include <asm/abs_addr.h> | ||
34 | #include <asm/firmware.h> | 33 | #include <asm/firmware.h> |
35 | 34 | ||
36 | #include "powernv.h" | 35 | #include "powernv.h" |
@@ -447,6 +446,11 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages) | |||
447 | pnv_tce_invalidate(tbl, tces, tcep - 1); | 446 | pnv_tce_invalidate(tbl, tces, tcep - 1); |
448 | } | 447 | } |
449 | 448 | ||
449 | static unsigned long pnv_tce_get(struct iommu_table *tbl, long index) | ||
450 | { | ||
451 | return ((u64 *)tbl->it_base)[index - tbl->it_offset]; | ||
452 | } | ||
453 | |||
450 | void pnv_pci_setup_iommu_table(struct iommu_table *tbl, | 454 | void pnv_pci_setup_iommu_table(struct iommu_table *tbl, |
451 | void *tce_mem, u64 tce_size, | 455 | void *tce_mem, u64 tce_size, |
452 | u64 dma_offset) | 456 | u64 dma_offset) |
@@ -597,6 +601,7 @@ void __init pnv_pci_init(void) | |||
597 | ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup; | 601 | ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup; |
598 | ppc_md.tce_build = pnv_tce_build; | 602 | ppc_md.tce_build = pnv_tce_build; |
599 | ppc_md.tce_free = pnv_tce_free; | 603 | ppc_md.tce_free = pnv_tce_free; |
604 | ppc_md.tce_get = pnv_tce_get; | ||
600 | ppc_md.pci_probe_mode = pnv_pci_probe_mode; | 605 | ppc_md.pci_probe_mode = pnv_pci_probe_mode; |
601 | set_pci_dma_ops(&dma_iommu_ops); | 606 | set_pci_dma_ops(&dma_iommu_ops); |
602 | 607 | ||
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 2d664c5a83b0..3f509f86432c 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c | |||
@@ -184,11 +184,15 @@ early_param("ps3flash", early_parse_ps3flash); | |||
184 | #define prealloc_ps3flash_bounce_buffer() do { } while (0) | 184 | #define prealloc_ps3flash_bounce_buffer() do { } while (0) |
185 | #endif | 185 | #endif |
186 | 186 | ||
187 | static int ps3_set_dabr(unsigned long dabr) | 187 | static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx) |
188 | { | 188 | { |
189 | enum {DABR_USER = 1, DABR_KERNEL = 2,}; | 189 | /* Have to set at least one bit in the DABRX */ |
190 | if (dabrx == 0 && dabr == 0) | ||
191 | dabrx = DABRX_USER; | ||
192 | /* hypervisor only allows us to set BTI, Kernel and user */ | ||
193 | dabrx &= DABRX_BTI | DABRX_KERNEL | DABRX_USER; | ||
190 | 194 | ||
191 | return lv1_set_dabr(dabr, DABR_KERNEL | DABR_USER) ? -1 : 0; | 195 | return lv1_set_dabr(dabr, dabrx) ? -1 : 0; |
192 | } | 196 | } |
193 | 197 | ||
194 | static void __init ps3_setup_arch(void) | 198 | static void __init ps3_setup_arch(void) |
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index c222189f5bb2..890622b87c8f 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile | |||
@@ -6,8 +6,9 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \ | |||
6 | firmware.o power.o dlpar.o mobility.o | 6 | firmware.o power.o dlpar.o mobility.o |
7 | obj-$(CONFIG_SMP) += smp.o | 7 | obj-$(CONFIG_SMP) += smp.o |
8 | obj-$(CONFIG_SCANLOG) += scanlog.o | 8 | obj-$(CONFIG_SCANLOG) += scanlog.o |
9 | obj-$(CONFIG_EEH) += eeh.o eeh_dev.o eeh_cache.o eeh_driver.o \ | 9 | obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ |
10 | eeh_event.o eeh_sysfs.o eeh_pseries.o | 10 | eeh_driver.o eeh_event.o eeh_sysfs.o \ |
11 | eeh_pseries.o | ||
11 | obj-$(CONFIG_KEXEC) += kexec.o | 12 | obj-$(CONFIG_KEXEC) += kexec.o |
12 | obj-$(CONFIG_PCI) += pci.o pci_dlpar.o | 13 | obj-$(CONFIG_PCI) += pci.o pci_dlpar.o |
13 | obj-$(CONFIG_PSERIES_MSI) += msi.o | 14 | obj-$(CONFIG_PSERIES_MSI) += msi.o |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index ecd394cf34e6..18c168b752da 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -92,6 +92,20 @@ struct eeh_ops *eeh_ops = NULL; | |||
92 | int eeh_subsystem_enabled; | 92 | int eeh_subsystem_enabled; |
93 | EXPORT_SYMBOL(eeh_subsystem_enabled); | 93 | EXPORT_SYMBOL(eeh_subsystem_enabled); |
94 | 94 | ||
95 | /* | ||
96 | * EEH probe mode support. The intention is to support multiple | ||
97 | * platforms for EEH. Some platforms like pSeries do PCI emunation | ||
98 | * based on device tree. However, other platforms like powernv probe | ||
99 | * PCI devices from hardware. The flag is used to distinguish that. | ||
100 | * In addition, struct eeh_ops::probe would be invoked for particular | ||
101 | * OF node or PCI device so that the corresponding PE would be created | ||
102 | * there. | ||
103 | */ | ||
104 | int eeh_probe_mode; | ||
105 | |||
106 | /* Global EEH mutex */ | ||
107 | DEFINE_MUTEX(eeh_mutex); | ||
108 | |||
95 | /* Lock to avoid races due to multiple reports of an error */ | 109 | /* Lock to avoid races due to multiple reports of an error */ |
96 | static DEFINE_RAW_SPINLOCK(confirm_error_lock); | 110 | static DEFINE_RAW_SPINLOCK(confirm_error_lock); |
97 | 111 | ||
@@ -204,22 +218,12 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len) | |||
204 | } | 218 | } |
205 | } | 219 | } |
206 | 220 | ||
207 | /* Gather status on devices under the bridge */ | ||
208 | if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) { | ||
209 | struct device_node *child; | ||
210 | |||
211 | for_each_child_of_node(dn, child) { | ||
212 | if (of_node_to_eeh_dev(child)) | ||
213 | n += eeh_gather_pci_data(of_node_to_eeh_dev(child), buf+n, len-n); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | return n; | 221 | return n; |
218 | } | 222 | } |
219 | 223 | ||
220 | /** | 224 | /** |
221 | * eeh_slot_error_detail - Generate combined log including driver log and error log | 225 | * eeh_slot_error_detail - Generate combined log including driver log and error log |
222 | * @edev: device to report error log for | 226 | * @pe: EEH PE |
223 | * @severity: temporary or permanent error log | 227 | * @severity: temporary or permanent error log |
224 | * | 228 | * |
225 | * This routine should be called to generate the combined log, which | 229 | * This routine should be called to generate the combined log, which |
@@ -227,17 +231,22 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len) | |||
227 | * out from the config space of the corresponding PCI device, while | 231 | * out from the config space of the corresponding PCI device, while |
228 | * the error log is fetched through platform dependent function call. | 232 | * the error log is fetched through platform dependent function call. |
229 | */ | 233 | */ |
230 | void eeh_slot_error_detail(struct eeh_dev *edev, int severity) | 234 | void eeh_slot_error_detail(struct eeh_pe *pe, int severity) |
231 | { | 235 | { |
232 | size_t loglen = 0; | 236 | size_t loglen = 0; |
233 | pci_regs_buf[0] = 0; | 237 | struct eeh_dev *edev; |
234 | 238 | ||
235 | eeh_pci_enable(edev, EEH_OPT_THAW_MMIO); | 239 | eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); |
236 | eeh_ops->configure_bridge(eeh_dev_to_of_node(edev)); | 240 | eeh_ops->configure_bridge(pe); |
237 | eeh_restore_bars(edev); | 241 | eeh_pe_restore_bars(pe); |
238 | loglen = eeh_gather_pci_data(edev, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); | ||
239 | 242 | ||
240 | eeh_ops->get_log(eeh_dev_to_of_node(edev), severity, pci_regs_buf, loglen); | 243 | pci_regs_buf[0] = 0; |
244 | eeh_pe_for_each_dev(pe, edev) { | ||
245 | loglen += eeh_gather_pci_data(edev, pci_regs_buf, | ||
246 | EEH_PCI_REGS_LOG_LEN); | ||
247 | } | ||
248 | |||
249 | eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); | ||
241 | } | 250 | } |
242 | 251 | ||
243 | /** | 252 | /** |
@@ -261,126 +270,8 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) | |||
261 | } | 270 | } |
262 | 271 | ||
263 | /** | 272 | /** |
264 | * eeh_find_device_pe - Retrieve the PE for the given device | 273 | * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze |
265 | * @dn: device node | 274 | * @edev: eeh device |
266 | * | ||
267 | * Return the PE under which this device lies | ||
268 | */ | ||
269 | struct device_node *eeh_find_device_pe(struct device_node *dn) | ||
270 | { | ||
271 | while (dn->parent && of_node_to_eeh_dev(dn->parent) && | ||
272 | (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) { | ||
273 | dn = dn->parent; | ||
274 | } | ||
275 | return dn; | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * __eeh_mark_slot - Mark all child devices as failed | ||
280 | * @parent: parent device | ||
281 | * @mode_flag: failure flag | ||
282 | * | ||
283 | * Mark all devices that are children of this device as failed. | ||
284 | * Mark the device driver too, so that it can see the failure | ||
285 | * immediately; this is critical, since some drivers poll | ||
286 | * status registers in interrupts ... If a driver is polling, | ||
287 | * and the slot is frozen, then the driver can deadlock in | ||
288 | * an interrupt context, which is bad. | ||
289 | */ | ||
290 | static void __eeh_mark_slot(struct device_node *parent, int mode_flag) | ||
291 | { | ||
292 | struct device_node *dn; | ||
293 | |||
294 | for_each_child_of_node(parent, dn) { | ||
295 | if (of_node_to_eeh_dev(dn)) { | ||
296 | /* Mark the pci device driver too */ | ||
297 | struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev; | ||
298 | |||
299 | of_node_to_eeh_dev(dn)->mode |= mode_flag; | ||
300 | |||
301 | if (dev && dev->driver) | ||
302 | dev->error_state = pci_channel_io_frozen; | ||
303 | |||
304 | __eeh_mark_slot(dn, mode_flag); | ||
305 | } | ||
306 | } | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * eeh_mark_slot - Mark the indicated device and its children as failed | ||
311 | * @dn: parent device | ||
312 | * @mode_flag: failure flag | ||
313 | * | ||
314 | * Mark the indicated device and its child devices as failed. | ||
315 | * The device drivers are marked as failed as well. | ||
316 | */ | ||
317 | void eeh_mark_slot(struct device_node *dn, int mode_flag) | ||
318 | { | ||
319 | struct pci_dev *dev; | ||
320 | dn = eeh_find_device_pe(dn); | ||
321 | |||
322 | /* Back up one, since config addrs might be shared */ | ||
323 | if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) | ||
324 | dn = dn->parent; | ||
325 | |||
326 | of_node_to_eeh_dev(dn)->mode |= mode_flag; | ||
327 | |||
328 | /* Mark the pci device too */ | ||
329 | dev = of_node_to_eeh_dev(dn)->pdev; | ||
330 | if (dev) | ||
331 | dev->error_state = pci_channel_io_frozen; | ||
332 | |||
333 | __eeh_mark_slot(dn, mode_flag); | ||
334 | } | ||
335 | |||
336 | /** | ||
337 | * __eeh_clear_slot - Clear failure flag for the child devices | ||
338 | * @parent: parent device | ||
339 | * @mode_flag: flag to be cleared | ||
340 | * | ||
341 | * Clear failure flag for the child devices. | ||
342 | */ | ||
343 | static void __eeh_clear_slot(struct device_node *parent, int mode_flag) | ||
344 | { | ||
345 | struct device_node *dn; | ||
346 | |||
347 | for_each_child_of_node(parent, dn) { | ||
348 | if (of_node_to_eeh_dev(dn)) { | ||
349 | of_node_to_eeh_dev(dn)->mode &= ~mode_flag; | ||
350 | of_node_to_eeh_dev(dn)->check_count = 0; | ||
351 | __eeh_clear_slot(dn, mode_flag); | ||
352 | } | ||
353 | } | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * eeh_clear_slot - Clear failure flag for the indicated device and its children | ||
358 | * @dn: parent device | ||
359 | * @mode_flag: flag to be cleared | ||
360 | * | ||
361 | * Clear failure flag for the indicated device and its children. | ||
362 | */ | ||
363 | void eeh_clear_slot(struct device_node *dn, int mode_flag) | ||
364 | { | ||
365 | unsigned long flags; | ||
366 | raw_spin_lock_irqsave(&confirm_error_lock, flags); | ||
367 | |||
368 | dn = eeh_find_device_pe(dn); | ||
369 | |||
370 | /* Back up one, since config addrs might be shared */ | ||
371 | if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) | ||
372 | dn = dn->parent; | ||
373 | |||
374 | of_node_to_eeh_dev(dn)->mode &= ~mode_flag; | ||
375 | of_node_to_eeh_dev(dn)->check_count = 0; | ||
376 | __eeh_clear_slot(dn, mode_flag); | ||
377 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze | ||
382 | * @dn: device node | ||
383 | * @dev: pci device, if known | ||
384 | * | 275 | * |
385 | * Check for an EEH failure for the given device node. Call this | 276 | * Check for an EEH failure for the given device node. Call this |
386 | * routine if the result of a read was all 0xff's and you want to | 277 | * routine if the result of a read was all 0xff's and you want to |
@@ -392,11 +283,13 @@ void eeh_clear_slot(struct device_node *dn, int mode_flag) | |||
392 | * | 283 | * |
393 | * It is safe to call this routine in an interrupt context. | 284 | * It is safe to call this routine in an interrupt context. |
394 | */ | 285 | */ |
395 | int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | 286 | int eeh_dev_check_failure(struct eeh_dev *edev) |
396 | { | 287 | { |
397 | int ret; | 288 | int ret; |
398 | unsigned long flags; | 289 | unsigned long flags; |
399 | struct eeh_dev *edev; | 290 | struct device_node *dn; |
291 | struct pci_dev *dev; | ||
292 | struct eeh_pe *pe; | ||
400 | int rc = 0; | 293 | int rc = 0; |
401 | const char *location; | 294 | const char *location; |
402 | 295 | ||
@@ -405,23 +298,23 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
405 | if (!eeh_subsystem_enabled) | 298 | if (!eeh_subsystem_enabled) |
406 | return 0; | 299 | return 0; |
407 | 300 | ||
408 | if (!dn) { | 301 | if (!edev) { |
409 | eeh_stats.no_dn++; | 302 | eeh_stats.no_dn++; |
410 | return 0; | 303 | return 0; |
411 | } | 304 | } |
412 | dn = eeh_find_device_pe(dn); | 305 | dn = eeh_dev_to_of_node(edev); |
413 | edev = of_node_to_eeh_dev(dn); | 306 | dev = eeh_dev_to_pci_dev(edev); |
307 | pe = edev->pe; | ||
414 | 308 | ||
415 | /* Access to IO BARs might get this far and still not want checking. */ | 309 | /* Access to IO BARs might get this far and still not want checking. */ |
416 | if (!(edev->mode & EEH_MODE_SUPPORTED) || | 310 | if (!pe) { |
417 | edev->mode & EEH_MODE_NOCHECK) { | ||
418 | eeh_stats.ignored_check++; | 311 | eeh_stats.ignored_check++; |
419 | pr_debug("EEH: Ignored check (%x) for %s %s\n", | 312 | pr_debug("EEH: Ignored check for %s %s\n", |
420 | edev->mode, eeh_pci_name(dev), dn->full_name); | 313 | eeh_pci_name(dev), dn->full_name); |
421 | return 0; | 314 | return 0; |
422 | } | 315 | } |
423 | 316 | ||
424 | if (!edev->config_addr && !edev->pe_config_addr) { | 317 | if (!pe->addr && !pe->config_addr) { |
425 | eeh_stats.no_cfg_addr++; | 318 | eeh_stats.no_cfg_addr++; |
426 | return 0; | 319 | return 0; |
427 | } | 320 | } |
@@ -434,13 +327,13 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
434 | */ | 327 | */ |
435 | raw_spin_lock_irqsave(&confirm_error_lock, flags); | 328 | raw_spin_lock_irqsave(&confirm_error_lock, flags); |
436 | rc = 1; | 329 | rc = 1; |
437 | if (edev->mode & EEH_MODE_ISOLATED) { | 330 | if (pe->state & EEH_PE_ISOLATED) { |
438 | edev->check_count++; | 331 | pe->check_count++; |
439 | if (edev->check_count % EEH_MAX_FAILS == 0) { | 332 | if (pe->check_count % EEH_MAX_FAILS == 0) { |
440 | location = of_get_property(dn, "ibm,loc-code", NULL); | 333 | location = of_get_property(dn, "ibm,loc-code", NULL); |
441 | printk(KERN_ERR "EEH: %d reads ignored for recovering device at " | 334 | printk(KERN_ERR "EEH: %d reads ignored for recovering device at " |
442 | "location=%s driver=%s pci addr=%s\n", | 335 | "location=%s driver=%s pci addr=%s\n", |
443 | edev->check_count, location, | 336 | pe->check_count, location, |
444 | eeh_driver_name(dev), eeh_pci_name(dev)); | 337 | eeh_driver_name(dev), eeh_pci_name(dev)); |
445 | printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", | 338 | printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", |
446 | eeh_driver_name(dev)); | 339 | eeh_driver_name(dev)); |
@@ -456,7 +349,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
456 | * function zero of a multi-function device. | 349 | * function zero of a multi-function device. |
457 | * In any case they must share a common PHB. | 350 | * In any case they must share a common PHB. |
458 | */ | 351 | */ |
459 | ret = eeh_ops->get_state(dn, NULL); | 352 | ret = eeh_ops->get_state(pe, NULL); |
460 | 353 | ||
461 | /* Note that config-io to empty slots may fail; | 354 | /* Note that config-io to empty slots may fail; |
462 | * they are empty when they don't have children. | 355 | * they are empty when they don't have children. |
@@ -469,7 +362,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
469 | (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == | 362 | (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == |
470 | (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { | 363 | (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { |
471 | eeh_stats.false_positives++; | 364 | eeh_stats.false_positives++; |
472 | edev->false_positives ++; | 365 | pe->false_positives++; |
473 | rc = 0; | 366 | rc = 0; |
474 | goto dn_unlock; | 367 | goto dn_unlock; |
475 | } | 368 | } |
@@ -480,10 +373,10 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
480 | * with other functions on this device, and functions under | 373 | * with other functions on this device, and functions under |
481 | * bridges. | 374 | * bridges. |
482 | */ | 375 | */ |
483 | eeh_mark_slot(dn, EEH_MODE_ISOLATED); | 376 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); |
484 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); | 377 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); |
485 | 378 | ||
486 | eeh_send_failure_event(edev); | 379 | eeh_send_failure_event(pe); |
487 | 380 | ||
488 | /* Most EEH events are due to device driver bugs. Having | 381 | /* Most EEH events are due to device driver bugs. Having |
489 | * a stack trace will help the device-driver authors figure | 382 | * a stack trace will help the device-driver authors figure |
@@ -497,7 +390,7 @@ dn_unlock: | |||
497 | return rc; | 390 | return rc; |
498 | } | 391 | } |
499 | 392 | ||
500 | EXPORT_SYMBOL_GPL(eeh_dn_check_failure); | 393 | EXPORT_SYMBOL_GPL(eeh_dev_check_failure); |
501 | 394 | ||
502 | /** | 395 | /** |
503 | * eeh_check_failure - Check if all 1's data is due to EEH slot freeze | 396 | * eeh_check_failure - Check if all 1's data is due to EEH slot freeze |
@@ -514,21 +407,19 @@ EXPORT_SYMBOL_GPL(eeh_dn_check_failure); | |||
514 | unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) | 407 | unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) |
515 | { | 408 | { |
516 | unsigned long addr; | 409 | unsigned long addr; |
517 | struct pci_dev *dev; | 410 | struct eeh_dev *edev; |
518 | struct device_node *dn; | ||
519 | 411 | ||
520 | /* Finding the phys addr + pci device; this is pretty quick. */ | 412 | /* Finding the phys addr + pci device; this is pretty quick. */ |
521 | addr = eeh_token_to_phys((unsigned long __force) token); | 413 | addr = eeh_token_to_phys((unsigned long __force) token); |
522 | dev = pci_addr_cache_get_device(addr); | 414 | edev = eeh_addr_cache_get_dev(addr); |
523 | if (!dev) { | 415 | if (!edev) { |
524 | eeh_stats.no_device++; | 416 | eeh_stats.no_device++; |
525 | return val; | 417 | return val; |
526 | } | 418 | } |
527 | 419 | ||
528 | dn = pci_device_to_OF_node(dev); | 420 | eeh_dev_check_failure(edev); |
529 | eeh_dn_check_failure(dn, dev); | ||
530 | 421 | ||
531 | pci_dev_put(dev); | 422 | pci_dev_put(eeh_dev_to_pci_dev(edev)); |
532 | return val; | 423 | return val; |
533 | } | 424 | } |
534 | 425 | ||
@@ -537,23 +428,22 @@ EXPORT_SYMBOL(eeh_check_failure); | |||
537 | 428 | ||
538 | /** | 429 | /** |
539 | * eeh_pci_enable - Enable MMIO or DMA transfers for this slot | 430 | * eeh_pci_enable - Enable MMIO or DMA transfers for this slot |
540 | * @edev: pci device node | 431 | * @pe: EEH PE |
541 | * | 432 | * |
542 | * This routine should be called to reenable frozen MMIO or DMA | 433 | * This routine should be called to reenable frozen MMIO or DMA |
543 | * so that it would work correctly again. It's useful while doing | 434 | * so that it would work correctly again. It's useful while doing |
544 | * recovery or log collection on the indicated device. | 435 | * recovery or log collection on the indicated device. |
545 | */ | 436 | */ |
546 | int eeh_pci_enable(struct eeh_dev *edev, int function) | 437 | int eeh_pci_enable(struct eeh_pe *pe, int function) |
547 | { | 438 | { |
548 | int rc; | 439 | int rc; |
549 | struct device_node *dn = eeh_dev_to_of_node(edev); | ||
550 | 440 | ||
551 | rc = eeh_ops->set_option(dn, function); | 441 | rc = eeh_ops->set_option(pe, function); |
552 | if (rc) | 442 | if (rc) |
553 | printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n", | 443 | pr_warning("%s: Unexpected state change %d on PHB#%d-PE#%x, err=%d\n", |
554 | function, rc, dn->full_name); | 444 | __func__, function, pe->phb->global_number, pe->addr, rc); |
555 | 445 | ||
556 | rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC); | 446 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); |
557 | if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) && | 447 | if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) && |
558 | (function == EEH_OPT_THAW_MMIO)) | 448 | (function == EEH_OPT_THAW_MMIO)) |
559 | return 0; | 449 | return 0; |
@@ -571,17 +461,24 @@ int eeh_pci_enable(struct eeh_dev *edev, int function) | |||
571 | */ | 461 | */ |
572 | int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | 462 | int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) |
573 | { | 463 | { |
574 | struct device_node *dn = pci_device_to_OF_node(dev); | 464 | struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); |
465 | struct eeh_pe *pe = edev->pe; | ||
466 | |||
467 | if (!pe) { | ||
468 | pr_err("%s: No PE found on PCI device %s\n", | ||
469 | __func__, pci_name(dev)); | ||
470 | return -EINVAL; | ||
471 | } | ||
575 | 472 | ||
576 | switch (state) { | 473 | switch (state) { |
577 | case pcie_deassert_reset: | 474 | case pcie_deassert_reset: |
578 | eeh_ops->reset(dn, EEH_RESET_DEACTIVATE); | 475 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); |
579 | break; | 476 | break; |
580 | case pcie_hot_reset: | 477 | case pcie_hot_reset: |
581 | eeh_ops->reset(dn, EEH_RESET_HOT); | 478 | eeh_ops->reset(pe, EEH_RESET_HOT); |
582 | break; | 479 | break; |
583 | case pcie_warm_reset: | 480 | case pcie_warm_reset: |
584 | eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL); | 481 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); |
585 | break; | 482 | break; |
586 | default: | 483 | default: |
587 | return -EINVAL; | 484 | return -EINVAL; |
@@ -591,66 +488,37 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
591 | } | 488 | } |
592 | 489 | ||
593 | /** | 490 | /** |
594 | * __eeh_set_pe_freset - Check the required reset for child devices | 491 | * eeh_set_pe_freset - Check the required reset for the indicated device |
595 | * @parent: parent device | 492 | * @data: EEH device |
596 | * @freset: return value | 493 | * @flag: return value |
597 | * | ||
598 | * Each device might have its preferred reset type: fundamental or | ||
599 | * hot reset. The routine is used to collect the information from | ||
600 | * the child devices so that they could be reset accordingly. | ||
601 | */ | ||
602 | void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset) | ||
603 | { | ||
604 | struct device_node *dn; | ||
605 | |||
606 | for_each_child_of_node(parent, dn) { | ||
607 | if (of_node_to_eeh_dev(dn)) { | ||
608 | struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev; | ||
609 | |||
610 | if (dev && dev->driver) | ||
611 | *freset |= dev->needs_freset; | ||
612 | |||
613 | __eeh_set_pe_freset(dn, freset); | ||
614 | } | ||
615 | } | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * eeh_set_pe_freset - Check the required reset for the indicated device and its children | ||
620 | * @dn: parent device | ||
621 | * @freset: return value | ||
622 | * | 494 | * |
623 | * Each device might have its preferred reset type: fundamental or | 495 | * Each device might have its preferred reset type: fundamental or |
624 | * hot reset. The routine is used to collected the information for | 496 | * hot reset. The routine is used to collected the information for |
625 | * the indicated device and its children so that the bunch of the | 497 | * the indicated device and its children so that the bunch of the |
626 | * devices could be reset properly. | 498 | * devices could be reset properly. |
627 | */ | 499 | */ |
628 | void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) | 500 | static void *eeh_set_dev_freset(void *data, void *flag) |
629 | { | 501 | { |
630 | struct pci_dev *dev; | 502 | struct pci_dev *dev; |
631 | dn = eeh_find_device_pe(dn); | 503 | unsigned int *freset = (unsigned int *)flag; |
632 | 504 | struct eeh_dev *edev = (struct eeh_dev *)data; | |
633 | /* Back up one, since config addrs might be shared */ | ||
634 | if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) | ||
635 | dn = dn->parent; | ||
636 | 505 | ||
637 | dev = of_node_to_eeh_dev(dn)->pdev; | 506 | dev = eeh_dev_to_pci_dev(edev); |
638 | if (dev) | 507 | if (dev) |
639 | *freset |= dev->needs_freset; | 508 | *freset |= dev->needs_freset; |
640 | 509 | ||
641 | __eeh_set_pe_freset(dn, freset); | 510 | return NULL; |
642 | } | 511 | } |
643 | 512 | ||
644 | /** | 513 | /** |
645 | * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second | 514 | * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second |
646 | * @edev: pci device node to be reset. | 515 | * @pe: EEH PE |
647 | * | 516 | * |
648 | * Assert the PCI #RST line for 1/4 second. | 517 | * Assert the PCI #RST line for 1/4 second. |
649 | */ | 518 | */ |
650 | static void eeh_reset_pe_once(struct eeh_dev *edev) | 519 | static void eeh_reset_pe_once(struct eeh_pe *pe) |
651 | { | 520 | { |
652 | unsigned int freset = 0; | 521 | unsigned int freset = 0; |
653 | struct device_node *dn = eeh_dev_to_of_node(edev); | ||
654 | 522 | ||
655 | /* Determine type of EEH reset required for | 523 | /* Determine type of EEH reset required for |
656 | * Partitionable Endpoint, a hot-reset (1) | 524 | * Partitionable Endpoint, a hot-reset (1) |
@@ -658,12 +526,12 @@ static void eeh_reset_pe_once(struct eeh_dev *edev) | |||
658 | * A fundamental reset required by any device under | 526 | * A fundamental reset required by any device under |
659 | * Partitionable Endpoint trumps hot-reset. | 527 | * Partitionable Endpoint trumps hot-reset. |
660 | */ | 528 | */ |
661 | eeh_set_pe_freset(dn, &freset); | 529 | eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset); |
662 | 530 | ||
663 | if (freset) | 531 | if (freset) |
664 | eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL); | 532 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); |
665 | else | 533 | else |
666 | eeh_ops->reset(dn, EEH_RESET_HOT); | 534 | eeh_ops->reset(pe, EEH_RESET_HOT); |
667 | 535 | ||
668 | /* The PCI bus requires that the reset be held high for at least | 536 | /* The PCI bus requires that the reset be held high for at least |
669 | * a 100 milliseconds. We wait a bit longer 'just in case'. | 537 | * a 100 milliseconds. We wait a bit longer 'just in case'. |
@@ -675,9 +543,9 @@ static void eeh_reset_pe_once(struct eeh_dev *edev) | |||
675 | * pci slot reset line is dropped. Make sure we don't miss | 543 | * pci slot reset line is dropped. Make sure we don't miss |
676 | * these, and clear the flag now. | 544 | * these, and clear the flag now. |
677 | */ | 545 | */ |
678 | eeh_clear_slot(dn, EEH_MODE_ISOLATED); | 546 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); |
679 | 547 | ||
680 | eeh_ops->reset(dn, EEH_RESET_DEACTIVATE); | 548 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); |
681 | 549 | ||
682 | /* After a PCI slot has been reset, the PCI Express spec requires | 550 | /* After a PCI slot has been reset, the PCI Express spec requires |
683 | * a 1.5 second idle time for the bus to stabilize, before starting | 551 | * a 1.5 second idle time for the bus to stabilize, before starting |
@@ -689,116 +557,36 @@ static void eeh_reset_pe_once(struct eeh_dev *edev) | |||
689 | 557 | ||
690 | /** | 558 | /** |
691 | * eeh_reset_pe - Reset the indicated PE | 559 | * eeh_reset_pe - Reset the indicated PE |
692 | * @edev: PCI device associated EEH device | 560 | * @pe: EEH PE |
693 | * | 561 | * |
694 | * This routine should be called to reset indicated device, including | 562 | * This routine should be called to reset indicated device, including |
695 | * PE. A PE might include multiple PCI devices and sometimes PCI bridges | 563 | * PE. A PE might include multiple PCI devices and sometimes PCI bridges |
696 | * might be involved as well. | 564 | * might be involved as well. |
697 | */ | 565 | */ |
698 | int eeh_reset_pe(struct eeh_dev *edev) | 566 | int eeh_reset_pe(struct eeh_pe *pe) |
699 | { | 567 | { |
700 | int i, rc; | 568 | int i, rc; |
701 | struct device_node *dn = eeh_dev_to_of_node(edev); | ||
702 | 569 | ||
703 | /* Take three shots at resetting the bus */ | 570 | /* Take three shots at resetting the bus */ |
704 | for (i=0; i<3; i++) { | 571 | for (i=0; i<3; i++) { |
705 | eeh_reset_pe_once(edev); | 572 | eeh_reset_pe_once(pe); |
706 | 573 | ||
707 | rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC); | 574 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); |
708 | if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) | 575 | if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) |
709 | return 0; | 576 | return 0; |
710 | 577 | ||
711 | if (rc < 0) { | 578 | if (rc < 0) { |
712 | printk(KERN_ERR "EEH: unrecoverable slot failure %s\n", | 579 | pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x", |
713 | dn->full_name); | 580 | __func__, pe->phb->global_number, pe->addr); |
714 | return -1; | 581 | return -1; |
715 | } | 582 | } |
716 | printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n", | 583 | pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n", |
717 | i+1, dn->full_name, rc); | 584 | i+1, pe->phb->global_number, pe->addr, rc); |
718 | } | 585 | } |
719 | 586 | ||
720 | return -1; | 587 | return -1; |
721 | } | 588 | } |
722 | 589 | ||
723 | /** Save and restore of PCI BARs | ||
724 | * | ||
725 | * Although firmware will set up BARs during boot, it doesn't | ||
726 | * set up device BAR's after a device reset, although it will, | ||
727 | * if requested, set up bridge configuration. Thus, we need to | ||
728 | * configure the PCI devices ourselves. | ||
729 | */ | ||
730 | |||
731 | /** | ||
732 | * eeh_restore_one_device_bars - Restore the Base Address Registers for one device | ||
733 | * @edev: PCI device associated EEH device | ||
734 | * | ||
735 | * Loads the PCI configuration space base address registers, | ||
736 | * the expansion ROM base address, the latency timer, and etc. | ||
737 | * from the saved values in the device node. | ||
738 | */ | ||
739 | static inline void eeh_restore_one_device_bars(struct eeh_dev *edev) | ||
740 | { | ||
741 | int i; | ||
742 | u32 cmd; | ||
743 | struct device_node *dn = eeh_dev_to_of_node(edev); | ||
744 | |||
745 | if (!edev->phb) | ||
746 | return; | ||
747 | |||
748 | for (i=4; i<10; i++) { | ||
749 | eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); | ||
750 | } | ||
751 | |||
752 | /* 12 == Expansion ROM Address */ | ||
753 | eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); | ||
754 | |||
755 | #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) | ||
756 | #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) | ||
757 | |||
758 | eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, | ||
759 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); | ||
760 | |||
761 | eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, | ||
762 | SAVED_BYTE(PCI_LATENCY_TIMER)); | ||
763 | |||
764 | /* max latency, min grant, interrupt pin and line */ | ||
765 | eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); | ||
766 | |||
767 | /* Restore PERR & SERR bits, some devices require it, | ||
768 | * don't touch the other command bits | ||
769 | */ | ||
770 | eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd); | ||
771 | if (edev->config_space[1] & PCI_COMMAND_PARITY) | ||
772 | cmd |= PCI_COMMAND_PARITY; | ||
773 | else | ||
774 | cmd &= ~PCI_COMMAND_PARITY; | ||
775 | if (edev->config_space[1] & PCI_COMMAND_SERR) | ||
776 | cmd |= PCI_COMMAND_SERR; | ||
777 | else | ||
778 | cmd &= ~PCI_COMMAND_SERR; | ||
779 | eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); | ||
780 | } | ||
781 | |||
782 | /** | ||
783 | * eeh_restore_bars - Restore the PCI config space info | ||
784 | * @edev: EEH device | ||
785 | * | ||
786 | * This routine performs a recursive walk to the children | ||
787 | * of this device as well. | ||
788 | */ | ||
789 | void eeh_restore_bars(struct eeh_dev *edev) | ||
790 | { | ||
791 | struct device_node *dn; | ||
792 | if (!edev) | ||
793 | return; | ||
794 | |||
795 | if ((edev->mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(edev->class_code)) | ||
796 | eeh_restore_one_device_bars(edev); | ||
797 | |||
798 | for_each_child_of_node(eeh_dev_to_of_node(edev), dn) | ||
799 | eeh_restore_bars(of_node_to_eeh_dev(dn)); | ||
800 | } | ||
801 | |||
802 | /** | 590 | /** |
803 | * eeh_save_bars - Save device bars | 591 | * eeh_save_bars - Save device bars |
804 | * @edev: PCI device associated EEH device | 592 | * @edev: PCI device associated EEH device |
@@ -808,7 +596,7 @@ void eeh_restore_bars(struct eeh_dev *edev) | |||
808 | * PCI devices are added individually; but, for the restore, | 596 | * PCI devices are added individually; but, for the restore, |
809 | * an entire slot is reset at a time. | 597 | * an entire slot is reset at a time. |
810 | */ | 598 | */ |
811 | static void eeh_save_bars(struct eeh_dev *edev) | 599 | void eeh_save_bars(struct eeh_dev *edev) |
812 | { | 600 | { |
813 | int i; | 601 | int i; |
814 | struct device_node *dn; | 602 | struct device_node *dn; |
@@ -822,102 +610,6 @@ static void eeh_save_bars(struct eeh_dev *edev) | |||
822 | } | 610 | } |
823 | 611 | ||
824 | /** | 612 | /** |
825 | * eeh_early_enable - Early enable EEH on the indicated device | ||
826 | * @dn: device node | ||
827 | * @data: BUID | ||
828 | * | ||
829 | * Enable EEH functionality on the specified PCI device. The function | ||
830 | * is expected to be called before real PCI probing is done. However, | ||
831 | * the PHBs have been initialized at this point. | ||
832 | */ | ||
833 | static void *eeh_early_enable(struct device_node *dn, void *data) | ||
834 | { | ||
835 | int ret; | ||
836 | const u32 *class_code = of_get_property(dn, "class-code", NULL); | ||
837 | const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL); | ||
838 | const u32 *device_id = of_get_property(dn, "device-id", NULL); | ||
839 | const u32 *regs; | ||
840 | int enable; | ||
841 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | ||
842 | |||
843 | edev->class_code = 0; | ||
844 | edev->mode = 0; | ||
845 | edev->check_count = 0; | ||
846 | edev->freeze_count = 0; | ||
847 | edev->false_positives = 0; | ||
848 | |||
849 | if (!of_device_is_available(dn)) | ||
850 | return NULL; | ||
851 | |||
852 | /* Ignore bad nodes. */ | ||
853 | if (!class_code || !vendor_id || !device_id) | ||
854 | return NULL; | ||
855 | |||
856 | /* There is nothing to check on PCI to ISA bridges */ | ||
857 | if (dn->type && !strcmp(dn->type, "isa")) { | ||
858 | edev->mode |= EEH_MODE_NOCHECK; | ||
859 | return NULL; | ||
860 | } | ||
861 | edev->class_code = *class_code; | ||
862 | |||
863 | /* Ok... see if this device supports EEH. Some do, some don't, | ||
864 | * and the only way to find out is to check each and every one. | ||
865 | */ | ||
866 | regs = of_get_property(dn, "reg", NULL); | ||
867 | if (regs) { | ||
868 | /* First register entry is addr (00BBSS00) */ | ||
869 | /* Try to enable eeh */ | ||
870 | ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE); | ||
871 | |||
872 | enable = 0; | ||
873 | if (ret == 0) { | ||
874 | edev->config_addr = regs[0]; | ||
875 | |||
876 | /* If the newer, better, ibm,get-config-addr-info is supported, | ||
877 | * then use that instead. | ||
878 | */ | ||
879 | edev->pe_config_addr = eeh_ops->get_pe_addr(dn); | ||
880 | |||
881 | /* Some older systems (Power4) allow the | ||
882 | * ibm,set-eeh-option call to succeed even on nodes | ||
883 | * where EEH is not supported. Verify support | ||
884 | * explicitly. | ||
885 | */ | ||
886 | ret = eeh_ops->get_state(dn, NULL); | ||
887 | if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) | ||
888 | enable = 1; | ||
889 | } | ||
890 | |||
891 | if (enable) { | ||
892 | eeh_subsystem_enabled = 1; | ||
893 | edev->mode |= EEH_MODE_SUPPORTED; | ||
894 | |||
895 | pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n", | ||
896 | dn->full_name, edev->config_addr, | ||
897 | edev->pe_config_addr); | ||
898 | } else { | ||
899 | |||
900 | /* This device doesn't support EEH, but it may have an | ||
901 | * EEH parent, in which case we mark it as supported. | ||
902 | */ | ||
903 | if (dn->parent && of_node_to_eeh_dev(dn->parent) && | ||
904 | (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) { | ||
905 | /* Parent supports EEH. */ | ||
906 | edev->mode |= EEH_MODE_SUPPORTED; | ||
907 | edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; | ||
908 | return NULL; | ||
909 | } | ||
910 | } | ||
911 | } else { | ||
912 | printk(KERN_WARNING "EEH: %s: unable to get reg property.\n", | ||
913 | dn->full_name); | ||
914 | } | ||
915 | |||
916 | eeh_save_bars(edev); | ||
917 | return NULL; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * eeh_ops_register - Register platform dependent EEH operations | 613 | * eeh_ops_register - Register platform dependent EEH operations |
922 | * @ops: platform dependent EEH operations | 614 | * @ops: platform dependent EEH operations |
923 | * | 615 | * |
@@ -982,7 +674,7 @@ int __exit eeh_ops_unregister(const char *name) | |||
982 | * Even if force-off is set, the EEH hardware is still enabled, so that | 674 | * Even if force-off is set, the EEH hardware is still enabled, so that |
983 | * newer systems can boot. | 675 | * newer systems can boot. |
984 | */ | 676 | */ |
985 | void __init eeh_init(void) | 677 | static int __init eeh_init(void) |
986 | { | 678 | { |
987 | struct pci_controller *hose, *tmp; | 679 | struct pci_controller *hose, *tmp; |
988 | struct device_node *phb; | 680 | struct device_node *phb; |
@@ -992,27 +684,34 @@ void __init eeh_init(void) | |||
992 | if (!eeh_ops) { | 684 | if (!eeh_ops) { |
993 | pr_warning("%s: Platform EEH operation not found\n", | 685 | pr_warning("%s: Platform EEH operation not found\n", |
994 | __func__); | 686 | __func__); |
995 | return; | 687 | return -EEXIST; |
996 | } else if ((ret = eeh_ops->init())) { | 688 | } else if ((ret = eeh_ops->init())) { |
997 | pr_warning("%s: Failed to call platform init function (%d)\n", | 689 | pr_warning("%s: Failed to call platform init function (%d)\n", |
998 | __func__, ret); | 690 | __func__, ret); |
999 | return; | 691 | return ret; |
1000 | } | 692 | } |
1001 | 693 | ||
1002 | raw_spin_lock_init(&confirm_error_lock); | 694 | raw_spin_lock_init(&confirm_error_lock); |
1003 | 695 | ||
1004 | /* Enable EEH for all adapters */ | 696 | /* Enable EEH for all adapters */ |
1005 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | 697 | if (eeh_probe_mode_devtree()) { |
1006 | phb = hose->dn; | 698 | list_for_each_entry_safe(hose, tmp, |
1007 | traverse_pci_devices(phb, eeh_early_enable, NULL); | 699 | &hose_list, list_node) { |
700 | phb = hose->dn; | ||
701 | traverse_pci_devices(phb, eeh_ops->of_probe, NULL); | ||
702 | } | ||
1008 | } | 703 | } |
1009 | 704 | ||
1010 | if (eeh_subsystem_enabled) | 705 | if (eeh_subsystem_enabled) |
1011 | printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n"); | 706 | pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); |
1012 | else | 707 | else |
1013 | printk(KERN_WARNING "EEH: No capable adapters found\n"); | 708 | pr_warning("EEH: No capable adapters found\n"); |
709 | |||
710 | return ret; | ||
1014 | } | 711 | } |
1015 | 712 | ||
713 | core_initcall_sync(eeh_init); | ||
714 | |||
1016 | /** | 715 | /** |
1017 | * eeh_add_device_early - Enable EEH for the indicated device_node | 716 | * eeh_add_device_early - Enable EEH for the indicated device_node |
1018 | * @dn: device node for which to set up EEH | 717 | * @dn: device node for which to set up EEH |
@@ -1037,7 +736,8 @@ static void eeh_add_device_early(struct device_node *dn) | |||
1037 | if (NULL == phb || 0 == phb->buid) | 736 | if (NULL == phb || 0 == phb->buid) |
1038 | return; | 737 | return; |
1039 | 738 | ||
1040 | eeh_early_enable(dn, NULL); | 739 | /* FIXME: hotplug support on POWERNV */ |
740 | eeh_ops->of_probe(dn, NULL); | ||
1041 | } | 741 | } |
1042 | 742 | ||
1043 | /** | 743 | /** |
@@ -1087,7 +787,7 @@ static void eeh_add_device_late(struct pci_dev *dev) | |||
1087 | edev->pdev = dev; | 787 | edev->pdev = dev; |
1088 | dev->dev.archdata.edev = edev; | 788 | dev->dev.archdata.edev = edev; |
1089 | 789 | ||
1090 | pci_addr_cache_insert_device(dev); | 790 | eeh_addr_cache_insert_dev(dev); |
1091 | eeh_sysfs_add_device(dev); | 791 | eeh_sysfs_add_device(dev); |
1092 | } | 792 | } |
1093 | 793 | ||
@@ -1143,7 +843,8 @@ static void eeh_remove_device(struct pci_dev *dev) | |||
1143 | dev->dev.archdata.edev = NULL; | 843 | dev->dev.archdata.edev = NULL; |
1144 | pci_dev_put(dev); | 844 | pci_dev_put(dev); |
1145 | 845 | ||
1146 | pci_addr_cache_remove_device(dev); | 846 | eeh_rmv_from_parent_pe(edev); |
847 | eeh_addr_cache_rmv_dev(dev); | ||
1147 | eeh_sysfs_remove_device(dev); | 848 | eeh_sysfs_remove_device(dev); |
1148 | } | 849 | } |
1149 | 850 | ||
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c index e5ae1c687c66..5a4c87903057 100644 --- a/arch/powerpc/platforms/pseries/eeh_cache.c +++ b/arch/powerpc/platforms/pseries/eeh_cache.c | |||
@@ -50,6 +50,7 @@ struct pci_io_addr_range { | |||
50 | struct rb_node rb_node; | 50 | struct rb_node rb_node; |
51 | unsigned long addr_lo; | 51 | unsigned long addr_lo; |
52 | unsigned long addr_hi; | 52 | unsigned long addr_hi; |
53 | struct eeh_dev *edev; | ||
53 | struct pci_dev *pcidev; | 54 | struct pci_dev *pcidev; |
54 | unsigned int flags; | 55 | unsigned int flags; |
55 | }; | 56 | }; |
@@ -59,7 +60,7 @@ static struct pci_io_addr_cache { | |||
59 | spinlock_t piar_lock; | 60 | spinlock_t piar_lock; |
60 | } pci_io_addr_cache_root; | 61 | } pci_io_addr_cache_root; |
61 | 62 | ||
62 | static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr) | 63 | static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr) |
63 | { | 64 | { |
64 | struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; | 65 | struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; |
65 | 66 | ||
@@ -74,7 +75,7 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr) | |||
74 | n = n->rb_right; | 75 | n = n->rb_right; |
75 | } else { | 76 | } else { |
76 | pci_dev_get(piar->pcidev); | 77 | pci_dev_get(piar->pcidev); |
77 | return piar->pcidev; | 78 | return piar->edev; |
78 | } | 79 | } |
79 | } | 80 | } |
80 | } | 81 | } |
@@ -83,7 +84,7 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr) | |||
83 | } | 84 | } |
84 | 85 | ||
85 | /** | 86 | /** |
86 | * pci_addr_cache_get_device - Get device, given only address | 87 | * eeh_addr_cache_get_dev - Get device, given only address |
87 | * @addr: mmio (PIO) phys address or i/o port number | 88 | * @addr: mmio (PIO) phys address or i/o port number |
88 | * | 89 | * |
89 | * Given an mmio phys address, or a port number, find a pci device | 90 | * Given an mmio phys address, or a port number, find a pci device |
@@ -92,15 +93,15 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr) | |||
92 | * from zero (that is, they do *not* have pci_io_addr added in). | 93 | * from zero (that is, they do *not* have pci_io_addr added in). |
93 | * It is safe to call this function within an interrupt. | 94 | * It is safe to call this function within an interrupt. |
94 | */ | 95 | */ |
95 | struct pci_dev *pci_addr_cache_get_device(unsigned long addr) | 96 | struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr) |
96 | { | 97 | { |
97 | struct pci_dev *dev; | 98 | struct eeh_dev *edev; |
98 | unsigned long flags; | 99 | unsigned long flags; |
99 | 100 | ||
100 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | 101 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); |
101 | dev = __pci_addr_cache_get_device(addr); | 102 | edev = __eeh_addr_cache_get_device(addr); |
102 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | 103 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); |
103 | return dev; | 104 | return edev; |
104 | } | 105 | } |
105 | 106 | ||
106 | #ifdef DEBUG | 107 | #ifdef DEBUG |
@@ -108,7 +109,7 @@ struct pci_dev *pci_addr_cache_get_device(unsigned long addr) | |||
108 | * Handy-dandy debug print routine, does nothing more | 109 | * Handy-dandy debug print routine, does nothing more |
109 | * than print out the contents of our addr cache. | 110 | * than print out the contents of our addr cache. |
110 | */ | 111 | */ |
111 | static void pci_addr_cache_print(struct pci_io_addr_cache *cache) | 112 | static void eeh_addr_cache_print(struct pci_io_addr_cache *cache) |
112 | { | 113 | { |
113 | struct rb_node *n; | 114 | struct rb_node *n; |
114 | int cnt = 0; | 115 | int cnt = 0; |
@@ -117,7 +118,7 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache) | |||
117 | while (n) { | 118 | while (n) { |
118 | struct pci_io_addr_range *piar; | 119 | struct pci_io_addr_range *piar; |
119 | piar = rb_entry(n, struct pci_io_addr_range, rb_node); | 120 | piar = rb_entry(n, struct pci_io_addr_range, rb_node); |
120 | printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n", | 121 | pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n", |
121 | (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, | 122 | (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, |
122 | piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev)); | 123 | piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev)); |
123 | cnt++; | 124 | cnt++; |
@@ -128,7 +129,7 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache) | |||
128 | 129 | ||
129 | /* Insert address range into the rb tree. */ | 130 | /* Insert address range into the rb tree. */ |
130 | static struct pci_io_addr_range * | 131 | static struct pci_io_addr_range * |
131 | pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, | 132 | eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo, |
132 | unsigned long ahi, unsigned int flags) | 133 | unsigned long ahi, unsigned int flags) |
133 | { | 134 | { |
134 | struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node; | 135 | struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node; |
@@ -146,23 +147,24 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, | |||
146 | } else { | 147 | } else { |
147 | if (dev != piar->pcidev || | 148 | if (dev != piar->pcidev || |
148 | alo != piar->addr_lo || ahi != piar->addr_hi) { | 149 | alo != piar->addr_lo || ahi != piar->addr_hi) { |
149 | printk(KERN_WARNING "PIAR: overlapping address range\n"); | 150 | pr_warning("PIAR: overlapping address range\n"); |
150 | } | 151 | } |
151 | return piar; | 152 | return piar; |
152 | } | 153 | } |
153 | } | 154 | } |
154 | piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); | 155 | piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); |
155 | if (!piar) | 156 | if (!piar) |
156 | return NULL; | 157 | return NULL; |
157 | 158 | ||
158 | pci_dev_get(dev); | 159 | pci_dev_get(dev); |
159 | piar->addr_lo = alo; | 160 | piar->addr_lo = alo; |
160 | piar->addr_hi = ahi; | 161 | piar->addr_hi = ahi; |
162 | piar->edev = pci_dev_to_eeh_dev(dev); | ||
161 | piar->pcidev = dev; | 163 | piar->pcidev = dev; |
162 | piar->flags = flags; | 164 | piar->flags = flags; |
163 | 165 | ||
164 | #ifdef DEBUG | 166 | #ifdef DEBUG |
165 | printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n", | 167 | pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n", |
166 | alo, ahi, pci_name(dev)); | 168 | alo, ahi, pci_name(dev)); |
167 | #endif | 169 | #endif |
168 | 170 | ||
@@ -172,7 +174,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, | |||
172 | return piar; | 174 | return piar; |
173 | } | 175 | } |
174 | 176 | ||
175 | static void __pci_addr_cache_insert_device(struct pci_dev *dev) | 177 | static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) |
176 | { | 178 | { |
177 | struct device_node *dn; | 179 | struct device_node *dn; |
178 | struct eeh_dev *edev; | 180 | struct eeh_dev *edev; |
@@ -180,7 +182,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev) | |||
180 | 182 | ||
181 | dn = pci_device_to_OF_node(dev); | 183 | dn = pci_device_to_OF_node(dev); |
182 | if (!dn) { | 184 | if (!dn) { |
183 | printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev)); | 185 | pr_warning("PCI: no pci dn found for dev=%s\n", pci_name(dev)); |
184 | return; | 186 | return; |
185 | } | 187 | } |
186 | 188 | ||
@@ -192,8 +194,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev) | |||
192 | } | 194 | } |
193 | 195 | ||
194 | /* Skip any devices for which EEH is not enabled. */ | 196 | /* Skip any devices for which EEH is not enabled. */ |
195 | if (!(edev->mode & EEH_MODE_SUPPORTED) || | 197 | if (!edev->pe) { |
196 | edev->mode & EEH_MODE_NOCHECK) { | ||
197 | #ifdef DEBUG | 198 | #ifdef DEBUG |
198 | pr_info("PCI: skip building address cache for=%s - %s\n", | 199 | pr_info("PCI: skip building address cache for=%s - %s\n", |
199 | pci_name(dev), dn->full_name); | 200 | pci_name(dev), dn->full_name); |
@@ -212,19 +213,19 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev) | |||
212 | continue; | 213 | continue; |
213 | if (start == 0 || ~start == 0 || end == 0 || ~end == 0) | 214 | if (start == 0 || ~start == 0 || end == 0 || ~end == 0) |
214 | continue; | 215 | continue; |
215 | pci_addr_cache_insert(dev, start, end, flags); | 216 | eeh_addr_cache_insert(dev, start, end, flags); |
216 | } | 217 | } |
217 | } | 218 | } |
218 | 219 | ||
219 | /** | 220 | /** |
220 | * pci_addr_cache_insert_device - Add a device to the address cache | 221 | * eeh_addr_cache_insert_dev - Add a device to the address cache |
221 | * @dev: PCI device whose I/O addresses we are interested in. | 222 | * @dev: PCI device whose I/O addresses we are interested in. |
222 | * | 223 | * |
223 | * In order to support the fast lookup of devices based on addresses, | 224 | * In order to support the fast lookup of devices based on addresses, |
224 | * we maintain a cache of devices that can be quickly searched. | 225 | * we maintain a cache of devices that can be quickly searched. |
225 | * This routine adds a device to that cache. | 226 | * This routine adds a device to that cache. |
226 | */ | 227 | */ |
227 | void pci_addr_cache_insert_device(struct pci_dev *dev) | 228 | void eeh_addr_cache_insert_dev(struct pci_dev *dev) |
228 | { | 229 | { |
229 | unsigned long flags; | 230 | unsigned long flags; |
230 | 231 | ||
@@ -233,11 +234,11 @@ void pci_addr_cache_insert_device(struct pci_dev *dev) | |||
233 | return; | 234 | return; |
234 | 235 | ||
235 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | 236 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); |
236 | __pci_addr_cache_insert_device(dev); | 237 | __eeh_addr_cache_insert_dev(dev); |
237 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | 238 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); |
238 | } | 239 | } |
239 | 240 | ||
240 | static inline void __pci_addr_cache_remove_device(struct pci_dev *dev) | 241 | static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev) |
241 | { | 242 | { |
242 | struct rb_node *n; | 243 | struct rb_node *n; |
243 | 244 | ||
@@ -258,7 +259,7 @@ restart: | |||
258 | } | 259 | } |
259 | 260 | ||
260 | /** | 261 | /** |
261 | * pci_addr_cache_remove_device - remove pci device from addr cache | 262 | * eeh_addr_cache_rmv_dev - remove pci device from addr cache |
262 | * @dev: device to remove | 263 | * @dev: device to remove |
263 | * | 264 | * |
264 | * Remove a device from the addr-cache tree. | 265 | * Remove a device from the addr-cache tree. |
@@ -266,17 +267,17 @@ restart: | |||
266 | * the tree multiple times (once per resource). | 267 | * the tree multiple times (once per resource). |
267 | * But so what; device removal doesn't need to be that fast. | 268 | * But so what; device removal doesn't need to be that fast. |
268 | */ | 269 | */ |
269 | void pci_addr_cache_remove_device(struct pci_dev *dev) | 270 | void eeh_addr_cache_rmv_dev(struct pci_dev *dev) |
270 | { | 271 | { |
271 | unsigned long flags; | 272 | unsigned long flags; |
272 | 273 | ||
273 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | 274 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); |
274 | __pci_addr_cache_remove_device(dev); | 275 | __eeh_addr_cache_rmv_dev(dev); |
275 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | 276 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); |
276 | } | 277 | } |
277 | 278 | ||
278 | /** | 279 | /** |
279 | * pci_addr_cache_build - Build a cache of I/O addresses | 280 | * eeh_addr_cache_build - Build a cache of I/O addresses |
280 | * | 281 | * |
281 | * Build a cache of pci i/o addresses. This cache will be used to | 282 | * Build a cache of pci i/o addresses. This cache will be used to |
282 | * find the pci device that corresponds to a given address. | 283 | * find the pci device that corresponds to a given address. |
@@ -284,7 +285,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev) | |||
284 | * Must be run late in boot process, after the pci controllers | 285 | * Must be run late in boot process, after the pci controllers |
285 | * have been scanned for devices (after all device resources are known). | 286 | * have been scanned for devices (after all device resources are known). |
286 | */ | 287 | */ |
287 | void __init pci_addr_cache_build(void) | 288 | void __init eeh_addr_cache_build(void) |
288 | { | 289 | { |
289 | struct device_node *dn; | 290 | struct device_node *dn; |
290 | struct eeh_dev *edev; | 291 | struct eeh_dev *edev; |
@@ -293,7 +294,7 @@ void __init pci_addr_cache_build(void) | |||
293 | spin_lock_init(&pci_io_addr_cache_root.piar_lock); | 294 | spin_lock_init(&pci_io_addr_cache_root.piar_lock); |
294 | 295 | ||
295 | for_each_pci_dev(dev) { | 296 | for_each_pci_dev(dev) { |
296 | pci_addr_cache_insert_device(dev); | 297 | eeh_addr_cache_insert_dev(dev); |
297 | 298 | ||
298 | dn = pci_device_to_OF_node(dev); | 299 | dn = pci_device_to_OF_node(dev); |
299 | if (!dn) | 300 | if (!dn) |
@@ -312,7 +313,7 @@ void __init pci_addr_cache_build(void) | |||
312 | 313 | ||
313 | #ifdef DEBUG | 314 | #ifdef DEBUG |
314 | /* Verify tree built up above, echo back the list of addrs. */ | 315 | /* Verify tree built up above, echo back the list of addrs. */ |
315 | pci_addr_cache_print(&pci_io_addr_cache_root); | 316 | eeh_addr_cache_print(&pci_io_addr_cache_root); |
316 | #endif | 317 | #endif |
317 | } | 318 | } |
318 | 319 | ||
diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c index c4507d095900..66442341d3a6 100644 --- a/arch/powerpc/platforms/pseries/eeh_dev.c +++ b/arch/powerpc/platforms/pseries/eeh_dev.c | |||
@@ -55,7 +55,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data) | |||
55 | struct eeh_dev *edev; | 55 | struct eeh_dev *edev; |
56 | 56 | ||
57 | /* Allocate EEH device */ | 57 | /* Allocate EEH device */ |
58 | edev = zalloc_maybe_bootmem(sizeof(*edev), GFP_KERNEL); | 58 | edev = kzalloc(sizeof(*edev), GFP_KERNEL); |
59 | if (!edev) { | 59 | if (!edev) { |
60 | pr_warning("%s: out of memory\n", __func__); | 60 | pr_warning("%s: out of memory\n", __func__); |
61 | return NULL; | 61 | return NULL; |
@@ -65,6 +65,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data) | |||
65 | PCI_DN(dn)->edev = edev; | 65 | PCI_DN(dn)->edev = edev; |
66 | edev->dn = dn; | 66 | edev->dn = dn; |
67 | edev->phb = phb; | 67 | edev->phb = phb; |
68 | INIT_LIST_HEAD(&edev->list); | ||
68 | 69 | ||
69 | return NULL; | 70 | return NULL; |
70 | } | 71 | } |
@@ -80,6 +81,9 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb) | |||
80 | { | 81 | { |
81 | struct device_node *dn = phb->dn; | 82 | struct device_node *dn = phb->dn; |
82 | 83 | ||
84 | /* EEH PE for PHB */ | ||
85 | eeh_phb_pe_create(phb); | ||
86 | |||
83 | /* EEH device for PHB */ | 87 | /* EEH device for PHB */ |
84 | eeh_dev_init(dn, phb); | 88 | eeh_dev_init(dn, phb); |
85 | 89 | ||
@@ -93,10 +97,16 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb) | |||
93 | * Scan all the existing PHBs and create EEH devices for their OF | 97 | * Scan all the existing PHBs and create EEH devices for their OF |
94 | * nodes and their children OF nodes | 98 | * nodes and their children OF nodes |
95 | */ | 99 | */ |
96 | void __init eeh_dev_phb_init(void) | 100 | static int __init eeh_dev_phb_init(void) |
97 | { | 101 | { |
98 | struct pci_controller *phb, *tmp; | 102 | struct pci_controller *phb, *tmp; |
99 | 103 | ||
100 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) | 104 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) |
101 | eeh_dev_phb_init_dynamic(phb); | 105 | eeh_dev_phb_init_dynamic(phb); |
106 | |||
107 | pr_info("EEH: devices created\n"); | ||
108 | |||
109 | return 0; | ||
102 | } | 110 | } |
111 | |||
112 | core_initcall(eeh_dev_phb_init); | ||
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c index baf92cd9dfab..8370ce7d5931 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/platforms/pseries/eeh_driver.c | |||
@@ -93,7 +93,7 @@ static void eeh_disable_irq(struct pci_dev *dev) | |||
93 | if (!irq_has_action(dev->irq)) | 93 | if (!irq_has_action(dev->irq)) |
94 | return; | 94 | return; |
95 | 95 | ||
96 | edev->mode |= EEH_MODE_IRQ_DISABLED; | 96 | edev->mode |= EEH_DEV_IRQ_DISABLED; |
97 | disable_irq_nosync(dev->irq); | 97 | disable_irq_nosync(dev->irq); |
98 | } | 98 | } |
99 | 99 | ||
@@ -108,36 +108,43 @@ static void eeh_enable_irq(struct pci_dev *dev) | |||
108 | { | 108 | { |
109 | struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); | 109 | struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); |
110 | 110 | ||
111 | if ((edev->mode) & EEH_MODE_IRQ_DISABLED) { | 111 | if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { |
112 | edev->mode &= ~EEH_MODE_IRQ_DISABLED; | 112 | edev->mode &= ~EEH_DEV_IRQ_DISABLED; |
113 | enable_irq(dev->irq); | 113 | enable_irq(dev->irq); |
114 | } | 114 | } |
115 | } | 115 | } |
116 | 116 | ||
117 | /** | 117 | /** |
118 | * eeh_report_error - Report pci error to each device driver | 118 | * eeh_report_error - Report pci error to each device driver |
119 | * @dev: PCI device | 119 | * @data: eeh device |
120 | * @userdata: return value | 120 | * @userdata: return value |
121 | * | 121 | * |
122 | * Report an EEH error to each device driver, collect up and | 122 | * Report an EEH error to each device driver, collect up and |
123 | * merge the device driver responses. Cumulative response | 123 | * merge the device driver responses. Cumulative response |
124 | * passed back in "userdata". | 124 | * passed back in "userdata". |
125 | */ | 125 | */ |
126 | static int eeh_report_error(struct pci_dev *dev, void *userdata) | 126 | static void *eeh_report_error(void *data, void *userdata) |
127 | { | 127 | { |
128 | struct eeh_dev *edev = (struct eeh_dev *)data; | ||
129 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); | ||
128 | enum pci_ers_result rc, *res = userdata; | 130 | enum pci_ers_result rc, *res = userdata; |
129 | struct pci_driver *driver = dev->driver; | 131 | struct pci_driver *driver = dev->driver; |
130 | 132 | ||
133 | /* We might not have the associated PCI device, | ||
134 | * then we should continue for next one. | ||
135 | */ | ||
136 | if (!dev) return NULL; | ||
137 | |||
131 | dev->error_state = pci_channel_io_frozen; | 138 | dev->error_state = pci_channel_io_frozen; |
132 | 139 | ||
133 | if (!driver) | 140 | if (!driver) |
134 | return 0; | 141 | return NULL; |
135 | 142 | ||
136 | eeh_disable_irq(dev); | 143 | eeh_disable_irq(dev); |
137 | 144 | ||
138 | if (!driver->err_handler || | 145 | if (!driver->err_handler || |
139 | !driver->err_handler->error_detected) | 146 | !driver->err_handler->error_detected) |
140 | return 0; | 147 | return NULL; |
141 | 148 | ||
142 | rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); | 149 | rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); |
143 | 150 | ||
@@ -145,27 +152,31 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata) | |||
145 | if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; | 152 | if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; |
146 | if (*res == PCI_ERS_RESULT_NONE) *res = rc; | 153 | if (*res == PCI_ERS_RESULT_NONE) *res = rc; |
147 | 154 | ||
148 | return 0; | 155 | return NULL; |
149 | } | 156 | } |
150 | 157 | ||
151 | /** | 158 | /** |
152 | * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled | 159 | * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled |
153 | * @dev: PCI device | 160 | * @data: eeh device |
154 | * @userdata: return value | 161 | * @userdata: return value |
155 | * | 162 | * |
156 | * Tells each device driver that IO ports, MMIO and config space I/O | 163 | * Tells each device driver that IO ports, MMIO and config space I/O |
157 | * are now enabled. Collects up and merges the device driver responses. | 164 | * are now enabled. Collects up and merges the device driver responses. |
158 | * Cumulative response passed back in "userdata". | 165 | * Cumulative response passed back in "userdata". |
159 | */ | 166 | */ |
160 | static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) | 167 | static void *eeh_report_mmio_enabled(void *data, void *userdata) |
161 | { | 168 | { |
169 | struct eeh_dev *edev = (struct eeh_dev *)data; | ||
170 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); | ||
162 | enum pci_ers_result rc, *res = userdata; | 171 | enum pci_ers_result rc, *res = userdata; |
163 | struct pci_driver *driver = dev->driver; | 172 | struct pci_driver *driver; |
164 | 173 | ||
165 | if (!driver || | 174 | if (!dev) return NULL; |
175 | |||
176 | if (!(driver = dev->driver) || | ||
166 | !driver->err_handler || | 177 | !driver->err_handler || |
167 | !driver->err_handler->mmio_enabled) | 178 | !driver->err_handler->mmio_enabled) |
168 | return 0; | 179 | return NULL; |
169 | 180 | ||
170 | rc = driver->err_handler->mmio_enabled(dev); | 181 | rc = driver->err_handler->mmio_enabled(dev); |
171 | 182 | ||
@@ -173,12 +184,12 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) | |||
173 | if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; | 184 | if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; |
174 | if (*res == PCI_ERS_RESULT_NONE) *res = rc; | 185 | if (*res == PCI_ERS_RESULT_NONE) *res = rc; |
175 | 186 | ||
176 | return 0; | 187 | return NULL; |
177 | } | 188 | } |
178 | 189 | ||
179 | /** | 190 | /** |
180 | * eeh_report_reset - Tell device that slot has been reset | 191 | * eeh_report_reset - Tell device that slot has been reset |
181 | * @dev: PCI device | 192 | * @data: eeh device |
182 | * @userdata: return value | 193 | * @userdata: return value |
183 | * | 194 | * |
184 | * This routine must be called while EEH tries to reset particular | 195 | * This routine must be called while EEH tries to reset particular |
@@ -186,13 +197,15 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) | |||
186 | * some actions, usually to save data the driver needs so that the | 197 | * some actions, usually to save data the driver needs so that the |
187 | * driver can work again while the device is recovered. | 198 | * driver can work again while the device is recovered. |
188 | */ | 199 | */ |
189 | static int eeh_report_reset(struct pci_dev *dev, void *userdata) | 200 | static void *eeh_report_reset(void *data, void *userdata) |
190 | { | 201 | { |
202 | struct eeh_dev *edev = (struct eeh_dev *)data; | ||
203 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); | ||
191 | enum pci_ers_result rc, *res = userdata; | 204 | enum pci_ers_result rc, *res = userdata; |
192 | struct pci_driver *driver = dev->driver; | 205 | struct pci_driver *driver; |
193 | 206 | ||
194 | if (!driver) | 207 | if (!dev || !(driver = dev->driver)) |
195 | return 0; | 208 | return NULL; |
196 | 209 | ||
197 | dev->error_state = pci_channel_io_normal; | 210 | dev->error_state = pci_channel_io_normal; |
198 | 211 | ||
@@ -200,7 +213,7 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata) | |||
200 | 213 | ||
201 | if (!driver->err_handler || | 214 | if (!driver->err_handler || |
202 | !driver->err_handler->slot_reset) | 215 | !driver->err_handler->slot_reset) |
203 | return 0; | 216 | return NULL; |
204 | 217 | ||
205 | rc = driver->err_handler->slot_reset(dev); | 218 | rc = driver->err_handler->slot_reset(dev); |
206 | if ((*res == PCI_ERS_RESULT_NONE) || | 219 | if ((*res == PCI_ERS_RESULT_NONE) || |
@@ -208,82 +221,89 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata) | |||
208 | if (*res == PCI_ERS_RESULT_DISCONNECT && | 221 | if (*res == PCI_ERS_RESULT_DISCONNECT && |
209 | rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; | 222 | rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; |
210 | 223 | ||
211 | return 0; | 224 | return NULL; |
212 | } | 225 | } |
213 | 226 | ||
214 | /** | 227 | /** |
215 | * eeh_report_resume - Tell device to resume normal operations | 228 | * eeh_report_resume - Tell device to resume normal operations |
216 | * @dev: PCI device | 229 | * @data: eeh device |
217 | * @userdata: return value | 230 | * @userdata: return value |
218 | * | 231 | * |
219 | * This routine must be called to notify the device driver that it | 232 | * This routine must be called to notify the device driver that it |
220 | * could resume so that the device driver can do some initialization | 233 | * could resume so that the device driver can do some initialization |
221 | * to make the recovered device work again. | 234 | * to make the recovered device work again. |
222 | */ | 235 | */ |
223 | static int eeh_report_resume(struct pci_dev *dev, void *userdata) | 236 | static void *eeh_report_resume(void *data, void *userdata) |
224 | { | 237 | { |
225 | struct pci_driver *driver = dev->driver; | 238 | struct eeh_dev *edev = (struct eeh_dev *)data; |
239 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); | ||
240 | struct pci_driver *driver; | ||
241 | |||
242 | if (!dev) return NULL; | ||
226 | 243 | ||
227 | dev->error_state = pci_channel_io_normal; | 244 | dev->error_state = pci_channel_io_normal; |
228 | 245 | ||
229 | if (!driver) | 246 | if (!(driver = dev->driver)) |
230 | return 0; | 247 | return NULL; |
231 | 248 | ||
232 | eeh_enable_irq(dev); | 249 | eeh_enable_irq(dev); |
233 | 250 | ||
234 | if (!driver->err_handler || | 251 | if (!driver->err_handler || |
235 | !driver->err_handler->resume) | 252 | !driver->err_handler->resume) |
236 | return 0; | 253 | return NULL; |
237 | 254 | ||
238 | driver->err_handler->resume(dev); | 255 | driver->err_handler->resume(dev); |
239 | 256 | ||
240 | return 0; | 257 | return NULL; |
241 | } | 258 | } |
242 | 259 | ||
243 | /** | 260 | /** |
244 | * eeh_report_failure - Tell device driver that device is dead. | 261 | * eeh_report_failure - Tell device driver that device is dead. |
245 | * @dev: PCI device | 262 | * @data: eeh device |
246 | * @userdata: return value | 263 | * @userdata: return value |
247 | * | 264 | * |
248 | * This informs the device driver that the device is permanently | 265 | * This informs the device driver that the device is permanently |
249 | * dead, and that no further recovery attempts will be made on it. | 266 | * dead, and that no further recovery attempts will be made on it. |
250 | */ | 267 | */ |
251 | static int eeh_report_failure(struct pci_dev *dev, void *userdata) | 268 | static void *eeh_report_failure(void *data, void *userdata) |
252 | { | 269 | { |
253 | struct pci_driver *driver = dev->driver; | 270 | struct eeh_dev *edev = (struct eeh_dev *)data; |
271 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); | ||
272 | struct pci_driver *driver; | ||
273 | |||
274 | if (!dev) return NULL; | ||
254 | 275 | ||
255 | dev->error_state = pci_channel_io_perm_failure; | 276 | dev->error_state = pci_channel_io_perm_failure; |
256 | 277 | ||
257 | if (!driver) | 278 | if (!(driver = dev->driver)) |
258 | return 0; | 279 | return NULL; |
259 | 280 | ||
260 | eeh_disable_irq(dev); | 281 | eeh_disable_irq(dev); |
261 | 282 | ||
262 | if (!driver->err_handler || | 283 | if (!driver->err_handler || |
263 | !driver->err_handler->error_detected) | 284 | !driver->err_handler->error_detected) |
264 | return 0; | 285 | return NULL; |
265 | 286 | ||
266 | driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); | 287 | driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); |
267 | 288 | ||
268 | return 0; | 289 | return NULL; |
269 | } | 290 | } |
270 | 291 | ||
271 | /** | 292 | /** |
272 | * eeh_reset_device - Perform actual reset of a pci slot | 293 | * eeh_reset_device - Perform actual reset of a pci slot |
273 | * @edev: PE associated EEH device | 294 | * @pe: EEH PE |
274 | * @bus: PCI bus corresponding to the isolcated slot | 295 | * @bus: PCI bus corresponding to the isolcated slot |
275 | * | 296 | * |
276 | * This routine must be called to do reset on the indicated PE. | 297 | * This routine must be called to do reset on the indicated PE. |
277 | * During the reset, udev might be invoked because those affected | 298 | * During the reset, udev might be invoked because those affected |
278 | * PCI devices will be removed and then added. | 299 | * PCI devices will be removed and then added. |
279 | */ | 300 | */ |
280 | static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) | 301 | static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) |
281 | { | 302 | { |
282 | struct device_node *dn; | ||
283 | int cnt, rc; | 303 | int cnt, rc; |
284 | 304 | ||
285 | /* pcibios will clear the counter; save the value */ | 305 | /* pcibios will clear the counter; save the value */ |
286 | cnt = edev->freeze_count; | 306 | cnt = pe->freeze_count; |
287 | 307 | ||
288 | if (bus) | 308 | if (bus) |
289 | pcibios_remove_pci_devices(bus); | 309 | pcibios_remove_pci_devices(bus); |
@@ -292,25 +312,13 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) | |||
292 | * Reconfigure bridges and devices. Don't try to bring the system | 312 | * Reconfigure bridges and devices. Don't try to bring the system |
293 | * up if the reset failed for some reason. | 313 | * up if the reset failed for some reason. |
294 | */ | 314 | */ |
295 | rc = eeh_reset_pe(edev); | 315 | rc = eeh_reset_pe(pe); |
296 | if (rc) | 316 | if (rc) |
297 | return rc; | 317 | return rc; |
298 | 318 | ||
299 | /* Walk over all functions on this device. */ | 319 | /* Restore PE */ |
300 | dn = eeh_dev_to_of_node(edev); | 320 | eeh_ops->configure_bridge(pe); |
301 | if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) | 321 | eeh_pe_restore_bars(pe); |
302 | dn = dn->parent->child; | ||
303 | |||
304 | while (dn) { | ||
305 | struct eeh_dev *pedev = of_node_to_eeh_dev(dn); | ||
306 | |||
307 | /* On Power4, always true because eeh_pe_config_addr=0 */ | ||
308 | if (edev->pe_config_addr == pedev->pe_config_addr) { | ||
309 | eeh_ops->configure_bridge(dn); | ||
310 | eeh_restore_bars(pedev); | ||
311 | } | ||
312 | dn = dn->sibling; | ||
313 | } | ||
314 | 322 | ||
315 | /* Give the system 5 seconds to finish running the user-space | 323 | /* Give the system 5 seconds to finish running the user-space |
316 | * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, | 324 | * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, |
@@ -322,7 +330,7 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) | |||
322 | ssleep(5); | 330 | ssleep(5); |
323 | pcibios_add_pci_devices(bus); | 331 | pcibios_add_pci_devices(bus); |
324 | } | 332 | } |
325 | edev->freeze_count = cnt; | 333 | pe->freeze_count = cnt; |
326 | 334 | ||
327 | return 0; | 335 | return 0; |
328 | } | 336 | } |
@@ -334,7 +342,7 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) | |||
334 | 342 | ||
335 | /** | 343 | /** |
336 | * eeh_handle_event - Reset a PCI device after hard lockup. | 344 | * eeh_handle_event - Reset a PCI device after hard lockup. |
337 | * @event: EEH event | 345 | * @pe: EEH PE |
338 | * | 346 | * |
339 | * While PHB detects address or data parity errors on particular PCI | 347 | * While PHB detects address or data parity errors on particular PCI |
340 | * slot, the associated PE will be frozen. Besides, DMA's occurring | 348 | * slot, the associated PE will be frozen. Besides, DMA's occurring |
@@ -349,69 +357,24 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) | |||
349 | * drivers (which cause a second set of hotplug events to go out to | 357 | * drivers (which cause a second set of hotplug events to go out to |
350 | * userspace). | 358 | * userspace). |
351 | */ | 359 | */ |
352 | struct eeh_dev *handle_eeh_events(struct eeh_event *event) | 360 | void eeh_handle_event(struct eeh_pe *pe) |
353 | { | 361 | { |
354 | struct device_node *frozen_dn; | ||
355 | struct eeh_dev *frozen_edev; | ||
356 | struct pci_bus *frozen_bus; | 362 | struct pci_bus *frozen_bus; |
357 | int rc = 0; | 363 | int rc = 0; |
358 | enum pci_ers_result result = PCI_ERS_RESULT_NONE; | 364 | enum pci_ers_result result = PCI_ERS_RESULT_NONE; |
359 | const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str; | ||
360 | |||
361 | frozen_dn = eeh_find_device_pe(eeh_dev_to_of_node(event->edev)); | ||
362 | if (!frozen_dn) { | ||
363 | location = of_get_property(eeh_dev_to_of_node(event->edev), "ibm,loc-code", NULL); | ||
364 | location = location ? location : "unknown"; | ||
365 | printk(KERN_ERR "EEH: Error: Cannot find partition endpoint " | ||
366 | "for location=%s pci addr=%s\n", | ||
367 | location, eeh_pci_name(eeh_dev_to_pci_dev(event->edev))); | ||
368 | return NULL; | ||
369 | } | ||
370 | |||
371 | frozen_bus = pcibios_find_pci_bus(frozen_dn); | ||
372 | location = of_get_property(frozen_dn, "ibm,loc-code", NULL); | ||
373 | location = location ? location : "unknown"; | ||
374 | |||
375 | /* There are two different styles for coming up with the PE. | ||
376 | * In the old style, it was the highest EEH-capable device | ||
377 | * which was always an EADS pci bridge. In the new style, | ||
378 | * there might not be any EADS bridges, and even when there are, | ||
379 | * the firmware marks them as "EEH incapable". So another | ||
380 | * two-step is needed to find the pci bus.. | ||
381 | */ | ||
382 | if (!frozen_bus) | ||
383 | frozen_bus = pcibios_find_pci_bus(frozen_dn->parent); | ||
384 | 365 | ||
366 | frozen_bus = eeh_pe_bus_get(pe); | ||
385 | if (!frozen_bus) { | 367 | if (!frozen_bus) { |
386 | printk(KERN_ERR "EEH: Cannot find PCI bus " | 368 | pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", |
387 | "for location=%s dn=%s\n", | 369 | __func__, pe->phb->global_number, pe->addr); |
388 | location, frozen_dn->full_name); | 370 | return; |
389 | return NULL; | ||
390 | } | 371 | } |
391 | 372 | ||
392 | frozen_edev = of_node_to_eeh_dev(frozen_dn); | 373 | pe->freeze_count++; |
393 | frozen_edev->freeze_count++; | 374 | if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) |
394 | pci_str = eeh_pci_name(eeh_dev_to_pci_dev(event->edev)); | ||
395 | drv_str = eeh_pcid_name(eeh_dev_to_pci_dev(event->edev)); | ||
396 | |||
397 | if (frozen_edev->freeze_count > EEH_MAX_ALLOWED_FREEZES) | ||
398 | goto excess_failures; | 375 | goto excess_failures; |
399 | 376 | pr_warning("EEH: This PCI device has failed %d times in the last hour\n", | |
400 | printk(KERN_WARNING | 377 | pe->freeze_count); |
401 | "EEH: This PCI device has failed %d times in the last hour:\n", | ||
402 | frozen_edev->freeze_count); | ||
403 | |||
404 | if (frozen_edev->pdev) { | ||
405 | bus_pci_str = pci_name(frozen_edev->pdev); | ||
406 | bus_drv_str = eeh_pcid_name(frozen_edev->pdev); | ||
407 | printk(KERN_WARNING | ||
408 | "EEH: Bus location=%s driver=%s pci addr=%s\n", | ||
409 | location, bus_drv_str, bus_pci_str); | ||
410 | } | ||
411 | |||
412 | printk(KERN_WARNING | ||
413 | "EEH: Device location=%s driver=%s pci addr=%s\n", | ||
414 | location, drv_str, pci_str); | ||
415 | 378 | ||
416 | /* Walk the various device drivers attached to this slot through | 379 | /* Walk the various device drivers attached to this slot through |
417 | * a reset sequence, giving each an opportunity to do what it needs | 380 | * a reset sequence, giving each an opportunity to do what it needs |
@@ -419,12 +382,12 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event) | |||
419 | * status ... if any child can't handle the reset, then the entire | 382 | * status ... if any child can't handle the reset, then the entire |
420 | * slot is dlpar removed and added. | 383 | * slot is dlpar removed and added. |
421 | */ | 384 | */ |
422 | pci_walk_bus(frozen_bus, eeh_report_error, &result); | 385 | eeh_pe_dev_traverse(pe, eeh_report_error, &result); |
423 | 386 | ||
424 | /* Get the current PCI slot state. This can take a long time, | 387 | /* Get the current PCI slot state. This can take a long time, |
425 | * sometimes over 3 seconds for certain systems. | 388 | * sometimes over 3 seconds for certain systems. |
426 | */ | 389 | */ |
427 | rc = eeh_ops->wait_state(eeh_dev_to_of_node(frozen_edev), MAX_WAIT_FOR_RECOVERY*1000); | 390 | rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); |
428 | if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { | 391 | if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { |
429 | printk(KERN_WARNING "EEH: Permanent failure\n"); | 392 | printk(KERN_WARNING "EEH: Permanent failure\n"); |
430 | goto hard_fail; | 393 | goto hard_fail; |
@@ -434,14 +397,14 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event) | |||
434 | * don't post the error log until after all dev drivers | 397 | * don't post the error log until after all dev drivers |
435 | * have been informed. | 398 | * have been informed. |
436 | */ | 399 | */ |
437 | eeh_slot_error_detail(frozen_edev, EEH_LOG_TEMP); | 400 | eeh_slot_error_detail(pe, EEH_LOG_TEMP); |
438 | 401 | ||
439 | /* If all device drivers were EEH-unaware, then shut | 402 | /* If all device drivers were EEH-unaware, then shut |
440 | * down all of the device drivers, and hope they | 403 | * down all of the device drivers, and hope they |
441 | * go down willingly, without panicing the system. | 404 | * go down willingly, without panicing the system. |
442 | */ | 405 | */ |
443 | if (result == PCI_ERS_RESULT_NONE) { | 406 | if (result == PCI_ERS_RESULT_NONE) { |
444 | rc = eeh_reset_device(frozen_edev, frozen_bus); | 407 | rc = eeh_reset_device(pe, frozen_bus); |
445 | if (rc) { | 408 | if (rc) { |
446 | printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc); | 409 | printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc); |
447 | goto hard_fail; | 410 | goto hard_fail; |
@@ -450,7 +413,7 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event) | |||
450 | 413 | ||
451 | /* If all devices reported they can proceed, then re-enable MMIO */ | 414 | /* If all devices reported they can proceed, then re-enable MMIO */ |
452 | if (result == PCI_ERS_RESULT_CAN_RECOVER) { | 415 | if (result == PCI_ERS_RESULT_CAN_RECOVER) { |
453 | rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_MMIO); | 416 | rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); |
454 | 417 | ||
455 | if (rc < 0) | 418 | if (rc < 0) |
456 | goto hard_fail; | 419 | goto hard_fail; |
@@ -458,13 +421,13 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event) | |||
458 | result = PCI_ERS_RESULT_NEED_RESET; | 421 | result = PCI_ERS_RESULT_NEED_RESET; |
459 | } else { | 422 | } else { |
460 | result = PCI_ERS_RESULT_NONE; | 423 | result = PCI_ERS_RESULT_NONE; |
461 | pci_walk_bus(frozen_bus, eeh_report_mmio_enabled, &result); | 424 | eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); |
462 | } | 425 | } |
463 | } | 426 | } |
464 | 427 | ||
465 | /* If all devices reported they can proceed, then re-enable DMA */ | 428 | /* If all devices reported they can proceed, then re-enable DMA */ |
466 | if (result == PCI_ERS_RESULT_CAN_RECOVER) { | 429 | if (result == PCI_ERS_RESULT_CAN_RECOVER) { |
467 | rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_DMA); | 430 | rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); |
468 | 431 | ||
469 | if (rc < 0) | 432 | if (rc < 0) |
470 | goto hard_fail; | 433 | goto hard_fail; |
@@ -482,13 +445,13 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event) | |||
482 | 445 | ||
483 | /* If any device called out for a reset, then reset the slot */ | 446 | /* If any device called out for a reset, then reset the slot */ |
484 | if (result == PCI_ERS_RESULT_NEED_RESET) { | 447 | if (result == PCI_ERS_RESULT_NEED_RESET) { |
485 | rc = eeh_reset_device(frozen_edev, NULL); | 448 | rc = eeh_reset_device(pe, NULL); |
486 | if (rc) { | 449 | if (rc) { |
487 | printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc); | 450 | printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc); |
488 | goto hard_fail; | 451 | goto hard_fail; |
489 | } | 452 | } |
490 | result = PCI_ERS_RESULT_NONE; | 453 | result = PCI_ERS_RESULT_NONE; |
491 | pci_walk_bus(frozen_bus, eeh_report_reset, &result); | 454 | eeh_pe_dev_traverse(pe, eeh_report_reset, &result); |
492 | } | 455 | } |
493 | 456 | ||
494 | /* All devices should claim they have recovered by now. */ | 457 | /* All devices should claim they have recovered by now. */ |
@@ -499,9 +462,9 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event) | |||
499 | } | 462 | } |
500 | 463 | ||
501 | /* Tell all device drivers that they can resume operations */ | 464 | /* Tell all device drivers that they can resume operations */ |
502 | pci_walk_bus(frozen_bus, eeh_report_resume, NULL); | 465 | eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); |
503 | 466 | ||
504 | return frozen_edev; | 467 | return; |
505 | 468 | ||
506 | excess_failures: | 469 | excess_failures: |
507 | /* | 470 | /* |
@@ -509,30 +472,26 @@ excess_failures: | |||
509 | * are due to poorly seated PCI cards. Only 10% or so are | 472 | * are due to poorly seated PCI cards. Only 10% or so are |
510 | * due to actual, failed cards. | 473 | * due to actual, failed cards. |
511 | */ | 474 | */ |
512 | printk(KERN_ERR | 475 | pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n" |
513 | "EEH: PCI device at location=%s driver=%s pci addr=%s\n" | 476 | "last hour and has been permanently disabled.\n" |
514 | "has failed %d times in the last hour " | 477 | "Please try reseating or replacing it.\n", |
515 | "and has been permanently disabled.\n" | 478 | pe->phb->global_number, pe->addr, |
516 | "Please try reseating this device or replacing it.\n", | 479 | pe->freeze_count); |
517 | location, drv_str, pci_str, frozen_edev->freeze_count); | ||
518 | goto perm_error; | 480 | goto perm_error; |
519 | 481 | ||
520 | hard_fail: | 482 | hard_fail: |
521 | printk(KERN_ERR | 483 | pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n" |
522 | "EEH: Unable to recover from failure of PCI device " | 484 | "Please try reseating or replacing it\n", |
523 | "at location=%s driver=%s pci addr=%s\n" | 485 | pe->phb->global_number, pe->addr); |
524 | "Please try reseating this device or replacing it.\n", | ||
525 | location, drv_str, pci_str); | ||
526 | 486 | ||
527 | perm_error: | 487 | perm_error: |
528 | eeh_slot_error_detail(frozen_edev, EEH_LOG_PERM); | 488 | eeh_slot_error_detail(pe, EEH_LOG_PERM); |
529 | 489 | ||
530 | /* Notify all devices that they're about to go down. */ | 490 | /* Notify all devices that they're about to go down. */ |
531 | pci_walk_bus(frozen_bus, eeh_report_failure, NULL); | 491 | eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); |
532 | 492 | ||
533 | /* Shut down the device drivers for good. */ | 493 | /* Shut down the device drivers for good. */ |
534 | pcibios_remove_pci_devices(frozen_bus); | 494 | if (frozen_bus) |
535 | 495 | pcibios_remove_pci_devices(frozen_bus); | |
536 | return NULL; | ||
537 | } | 496 | } |
538 | 497 | ||
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c index fb506317ebb0..51faaac8abe6 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/platforms/pseries/eeh_event.c | |||
@@ -57,7 +57,7 @@ static int eeh_event_handler(void * dummy) | |||
57 | { | 57 | { |
58 | unsigned long flags; | 58 | unsigned long flags; |
59 | struct eeh_event *event; | 59 | struct eeh_event *event; |
60 | struct eeh_dev *edev; | 60 | struct eeh_pe *pe; |
61 | 61 | ||
62 | set_task_comm(current, "eehd"); | 62 | set_task_comm(current, "eehd"); |
63 | 63 | ||
@@ -76,28 +76,23 @@ static int eeh_event_handler(void * dummy) | |||
76 | 76 | ||
77 | /* Serialize processing of EEH events */ | 77 | /* Serialize processing of EEH events */ |
78 | mutex_lock(&eeh_event_mutex); | 78 | mutex_lock(&eeh_event_mutex); |
79 | edev = event->edev; | 79 | pe = event->pe; |
80 | eeh_mark_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING); | 80 | eeh_pe_state_mark(pe, EEH_PE_RECOVERING); |
81 | 81 | pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n", | |
82 | printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", | 82 | pe->phb->global_number, pe->addr); |
83 | eeh_pci_name(edev->pdev)); | ||
84 | 83 | ||
85 | set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */ | 84 | set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */ |
86 | edev = handle_eeh_events(event); | 85 | eeh_handle_event(pe); |
87 | 86 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING); | |
88 | if (edev) { | ||
89 | eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING); | ||
90 | pci_dev_put(edev->pdev); | ||
91 | } | ||
92 | 87 | ||
93 | kfree(event); | 88 | kfree(event); |
94 | mutex_unlock(&eeh_event_mutex); | 89 | mutex_unlock(&eeh_event_mutex); |
95 | 90 | ||
96 | /* If there are no new errors after an hour, clear the counter. */ | 91 | /* If there are no new errors after an hour, clear the counter. */ |
97 | if (edev && edev->freeze_count>0) { | 92 | if (pe && pe->freeze_count > 0) { |
98 | msleep_interruptible(3600*1000); | 93 | msleep_interruptible(3600*1000); |
99 | if (edev->freeze_count>0) | 94 | if (pe->freeze_count > 0) |
100 | edev->freeze_count--; | 95 | pe->freeze_count--; |
101 | 96 | ||
102 | } | 97 | } |
103 | 98 | ||
@@ -119,36 +114,23 @@ static void eeh_thread_launcher(struct work_struct *dummy) | |||
119 | 114 | ||
120 | /** | 115 | /** |
121 | * eeh_send_failure_event - Generate a PCI error event | 116 | * eeh_send_failure_event - Generate a PCI error event |
122 | * @edev: EEH device | 117 | * @pe: EEH PE |
123 | * | 118 | * |
124 | * This routine can be called within an interrupt context; | 119 | * This routine can be called within an interrupt context; |
125 | * the actual event will be delivered in a normal context | 120 | * the actual event will be delivered in a normal context |
126 | * (from a workqueue). | 121 | * (from a workqueue). |
127 | */ | 122 | */ |
128 | int eeh_send_failure_event(struct eeh_dev *edev) | 123 | int eeh_send_failure_event(struct eeh_pe *pe) |
129 | { | 124 | { |
130 | unsigned long flags; | 125 | unsigned long flags; |
131 | struct eeh_event *event; | 126 | struct eeh_event *event; |
132 | struct device_node *dn = eeh_dev_to_of_node(edev); | ||
133 | const char *location; | ||
134 | |||
135 | if (!mem_init_done) { | ||
136 | printk(KERN_ERR "EEH: event during early boot not handled\n"); | ||
137 | location = of_get_property(dn, "ibm,loc-code", NULL); | ||
138 | printk(KERN_ERR "EEH: device node = %s\n", dn->full_name); | ||
139 | printk(KERN_ERR "EEH: PCI location = %s\n", location); | ||
140 | return 1; | ||
141 | } | ||
142 | event = kmalloc(sizeof(*event), GFP_ATOMIC); | ||
143 | if (event == NULL) { | ||
144 | printk(KERN_ERR "EEH: out of memory, event not handled\n"); | ||
145 | return 1; | ||
146 | } | ||
147 | |||
148 | if (edev->pdev) | ||
149 | pci_dev_get(edev->pdev); | ||
150 | 127 | ||
151 | event->edev = edev; | 128 | event = kzalloc(sizeof(*event), GFP_ATOMIC); |
129 | if (!event) { | ||
130 | pr_err("EEH: out of memory, event not handled\n"); | ||
131 | return -ENOMEM; | ||
132 | } | ||
133 | event->pe = pe; | ||
152 | 134 | ||
153 | /* We may or may not be called in an interrupt context */ | 135 | /* We may or may not be called in an interrupt context */ |
154 | spin_lock_irqsave(&eeh_eventlist_lock, flags); | 136 | spin_lock_irqsave(&eeh_eventlist_lock, flags); |
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c new file mode 100644 index 000000000000..904123c7657b --- /dev/null +++ b/arch/powerpc/platforms/pseries/eeh_pe.c | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | * The file intends to implement PE based on the information from | ||
3 | * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device. | ||
4 | * All the PEs should be organized as hierarchy tree. The first level | ||
5 | * of the tree will be associated to existing PHBs since the particular | ||
6 | * PE is only meaningful in one PHB domain. | ||
7 | * | ||
8 | * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 | */ | ||
24 | |||
25 | #include <linux/export.h> | ||
26 | #include <linux/gfp.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/string.h> | ||
31 | |||
32 | #include <asm/pci-bridge.h> | ||
33 | #include <asm/ppc-pci.h> | ||
34 | |||
35 | static LIST_HEAD(eeh_phb_pe); | ||
36 | |||
37 | /** | ||
38 | * eeh_pe_alloc - Allocate PE | ||
39 | * @phb: PCI controller | ||
40 | * @type: PE type | ||
41 | * | ||
42 | * Allocate PE instance dynamically. | ||
43 | */ | ||
44 | static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type) | ||
45 | { | ||
46 | struct eeh_pe *pe; | ||
47 | |||
48 | /* Allocate PHB PE */ | ||
49 | pe = kzalloc(sizeof(struct eeh_pe), GFP_KERNEL); | ||
50 | if (!pe) return NULL; | ||
51 | |||
52 | /* Initialize PHB PE */ | ||
53 | pe->type = type; | ||
54 | pe->phb = phb; | ||
55 | INIT_LIST_HEAD(&pe->child_list); | ||
56 | INIT_LIST_HEAD(&pe->child); | ||
57 | INIT_LIST_HEAD(&pe->edevs); | ||
58 | |||
59 | return pe; | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * eeh_phb_pe_create - Create PHB PE | ||
64 | * @phb: PCI controller | ||
65 | * | ||
66 | * The function should be called while the PHB is detected during | ||
67 | * system boot or PCI hotplug in order to create PHB PE. | ||
68 | */ | ||
69 | int __devinit eeh_phb_pe_create(struct pci_controller *phb) | ||
70 | { | ||
71 | struct eeh_pe *pe; | ||
72 | |||
73 | /* Allocate PHB PE */ | ||
74 | pe = eeh_pe_alloc(phb, EEH_PE_PHB); | ||
75 | if (!pe) { | ||
76 | pr_err("%s: out of memory!\n", __func__); | ||
77 | return -ENOMEM; | ||
78 | } | ||
79 | |||
80 | /* Put it into the list */ | ||
81 | eeh_lock(); | ||
82 | list_add_tail(&pe->child, &eeh_phb_pe); | ||
83 | eeh_unlock(); | ||
84 | |||
85 | pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB | ||
92 | * @phb: PCI controller | ||
93 | * | ||
94 | * The overall PEs form hierarchy tree. The first layer of the | ||
95 | * hierarchy tree is composed of PHB PEs. The function is used | ||
96 | * to retrieve the corresponding PHB PE according to the given PHB. | ||
97 | */ | ||
98 | static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb) | ||
99 | { | ||
100 | struct eeh_pe *pe; | ||
101 | |||
102 | eeh_lock(); | ||
103 | |||
104 | list_for_each_entry(pe, &eeh_phb_pe, child) { | ||
105 | /* | ||
106 | * Actually, we needn't check the type since | ||
107 | * the PE for PHB has been determined when that | ||
108 | * was created. | ||
109 | */ | ||
110 | if (pe->type == EEH_PE_PHB && | ||
111 | pe->phb == phb) { | ||
112 | eeh_unlock(); | ||
113 | return pe; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | eeh_unlock(); | ||
118 | |||
119 | return NULL; | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * eeh_pe_next - Retrieve the next PE in the tree | ||
124 | * @pe: current PE | ||
125 | * @root: root PE | ||
126 | * | ||
127 | * The function is used to retrieve the next PE in the | ||
128 | * hierarchy PE tree. | ||
129 | */ | ||
130 | static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, | ||
131 | struct eeh_pe *root) | ||
132 | { | ||
133 | struct list_head *next = pe->child_list.next; | ||
134 | |||
135 | if (next == &pe->child_list) { | ||
136 | while (1) { | ||
137 | if (pe == root) | ||
138 | return NULL; | ||
139 | next = pe->child.next; | ||
140 | if (next != &pe->parent->child_list) | ||
141 | break; | ||
142 | pe = pe->parent; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | return list_entry(next, struct eeh_pe, child); | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * eeh_pe_traverse - Traverse PEs in the specified PHB | ||
151 | * @root: root PE | ||
152 | * @fn: callback | ||
153 | * @flag: extra parameter to callback | ||
154 | * | ||
155 | * The function is used to traverse the specified PE and its | ||
156 | * child PEs. The traversing is to be terminated once the | ||
157 | * callback returns something other than NULL, or no more PEs | ||
158 | * to be traversed. | ||
159 | */ | ||
160 | static void *eeh_pe_traverse(struct eeh_pe *root, | ||
161 | eeh_traverse_func fn, void *flag) | ||
162 | { | ||
163 | struct eeh_pe *pe; | ||
164 | void *ret; | ||
165 | |||
166 | for (pe = root; pe; pe = eeh_pe_next(pe, root)) { | ||
167 | ret = fn(pe, flag); | ||
168 | if (ret) return ret; | ||
169 | } | ||
170 | |||
171 | return NULL; | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * eeh_pe_dev_traverse - Traverse the devices from the PE | ||
176 | * @root: EEH PE | ||
177 | * @fn: function callback | ||
178 | * @flag: extra parameter to callback | ||
179 | * | ||
180 | * The function is used to traverse the devices of the specified | ||
181 | * PE and its child PEs. | ||
182 | */ | ||
183 | void *eeh_pe_dev_traverse(struct eeh_pe *root, | ||
184 | eeh_traverse_func fn, void *flag) | ||
185 | { | ||
186 | struct eeh_pe *pe; | ||
187 | struct eeh_dev *edev; | ||
188 | void *ret; | ||
189 | |||
190 | if (!root) { | ||
191 | pr_warning("%s: Invalid PE %p\n", __func__, root); | ||
192 | return NULL; | ||
193 | } | ||
194 | |||
195 | /* Traverse root PE */ | ||
196 | for (pe = root; pe; pe = eeh_pe_next(pe, root)) { | ||
197 | eeh_pe_for_each_dev(pe, edev) { | ||
198 | ret = fn(edev, flag); | ||
199 | if (ret) return ret; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | return NULL; | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * __eeh_pe_get - Check the PE address | ||
208 | * @data: EEH PE | ||
209 | * @flag: EEH device | ||
210 | * | ||
211 | * For one particular PE, it can be identified by PE address | ||
212 | * or tranditional BDF address. BDF address is composed of | ||
213 | * Bus/Device/Function number. The extra data referred by flag | ||
214 | * indicates which type of address should be used. | ||
215 | */ | ||
216 | static void *__eeh_pe_get(void *data, void *flag) | ||
217 | { | ||
218 | struct eeh_pe *pe = (struct eeh_pe *)data; | ||
219 | struct eeh_dev *edev = (struct eeh_dev *)flag; | ||
220 | |||
221 | /* Unexpected PHB PE */ | ||
222 | if (pe->type == EEH_PE_PHB) | ||
223 | return NULL; | ||
224 | |||
225 | /* We prefer PE address */ | ||
226 | if (edev->pe_config_addr && | ||
227 | (edev->pe_config_addr == pe->addr)) | ||
228 | return pe; | ||
229 | |||
230 | /* Try BDF address */ | ||
231 | if (edev->pe_config_addr && | ||
232 | (edev->config_addr == pe->config_addr)) | ||
233 | return pe; | ||
234 | |||
235 | return NULL; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * eeh_pe_get - Search PE based on the given address | ||
240 | * @edev: EEH device | ||
241 | * | ||
242 | * Search the corresponding PE based on the specified address which | ||
243 | * is included in the eeh device. The function is used to check if | ||
244 | * the associated PE has been created against the PE address. It's | ||
245 | * notable that the PE address has 2 format: traditional PE address | ||
246 | * which is composed of PCI bus/device/function number, or unified | ||
247 | * PE address. | ||
248 | */ | ||
249 | static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) | ||
250 | { | ||
251 | struct eeh_pe *root = eeh_phb_pe_get(edev->phb); | ||
252 | struct eeh_pe *pe; | ||
253 | |||
254 | eeh_lock(); | ||
255 | pe = eeh_pe_traverse(root, __eeh_pe_get, edev); | ||
256 | eeh_unlock(); | ||
257 | |||
258 | return pe; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * eeh_pe_get_parent - Retrieve the parent PE | ||
263 | * @edev: EEH device | ||
264 | * | ||
265 | * The whole PEs existing in the system are organized as hierarchy | ||
266 | * tree. The function is used to retrieve the parent PE according | ||
267 | * to the parent EEH device. | ||
268 | */ | ||
269 | static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) | ||
270 | { | ||
271 | struct device_node *dn; | ||
272 | struct eeh_dev *parent; | ||
273 | |||
274 | /* | ||
275 | * It might have the case for the indirect parent | ||
276 | * EEH device already having associated PE, but | ||
277 | * the direct parent EEH device doesn't have yet. | ||
278 | */ | ||
279 | dn = edev->dn->parent; | ||
280 | while (dn) { | ||
281 | /* We're poking out of PCI territory */ | ||
282 | if (!PCI_DN(dn)) return NULL; | ||
283 | |||
284 | parent = of_node_to_eeh_dev(dn); | ||
285 | /* We're poking out of PCI territory */ | ||
286 | if (!parent) return NULL; | ||
287 | |||
288 | if (parent->pe) | ||
289 | return parent->pe; | ||
290 | |||
291 | dn = dn->parent; | ||
292 | } | ||
293 | |||
294 | return NULL; | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * eeh_add_to_parent_pe - Add EEH device to parent PE | ||
299 | * @edev: EEH device | ||
300 | * | ||
301 | * Add EEH device to the parent PE. If the parent PE already | ||
302 | * exists, the PE type will be changed to EEH_PE_BUS. Otherwise, | ||
303 | * we have to create new PE to hold the EEH device and the new | ||
304 | * PE will be linked to its parent PE as well. | ||
305 | */ | ||
306 | int eeh_add_to_parent_pe(struct eeh_dev *edev) | ||
307 | { | ||
308 | struct eeh_pe *pe, *parent; | ||
309 | |||
310 | /* | ||
311 | * Search the PE has been existing or not according | ||
312 | * to the PE address. If that has been existing, the | ||
313 | * PE should be composed of PCI bus and its subordinate | ||
314 | * components. | ||
315 | */ | ||
316 | pe = eeh_pe_get(edev); | ||
317 | if (pe) { | ||
318 | if (!edev->pe_config_addr) { | ||
319 | pr_err("%s: PE with addr 0x%x already exists\n", | ||
320 | __func__, edev->config_addr); | ||
321 | return -EEXIST; | ||
322 | } | ||
323 | |||
324 | /* Mark the PE as type of PCI bus */ | ||
325 | pe->type = EEH_PE_BUS; | ||
326 | edev->pe = pe; | ||
327 | |||
328 | /* Put the edev to PE */ | ||
329 | list_add_tail(&edev->list, &pe->edevs); | ||
330 | pr_debug("EEH: Add %s to Bus PE#%x\n", | ||
331 | edev->dn->full_name, pe->addr); | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | /* Create a new EEH PE */ | ||
337 | pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE); | ||
338 | if (!pe) { | ||
339 | pr_err("%s: out of memory!\n", __func__); | ||
340 | return -ENOMEM; | ||
341 | } | ||
342 | pe->addr = edev->pe_config_addr; | ||
343 | pe->config_addr = edev->config_addr; | ||
344 | |||
345 | /* | ||
346 | * Put the new EEH PE into hierarchy tree. If the parent | ||
347 | * can't be found, the newly created PE will be attached | ||
348 | * to PHB directly. Otherwise, we have to associate the | ||
349 | * PE with its parent. | ||
350 | */ | ||
351 | parent = eeh_pe_get_parent(edev); | ||
352 | if (!parent) { | ||
353 | parent = eeh_phb_pe_get(edev->phb); | ||
354 | if (!parent) { | ||
355 | pr_err("%s: No PHB PE is found (PHB Domain=%d)\n", | ||
356 | __func__, edev->phb->global_number); | ||
357 | edev->pe = NULL; | ||
358 | kfree(pe); | ||
359 | return -EEXIST; | ||
360 | } | ||
361 | } | ||
362 | pe->parent = parent; | ||
363 | |||
364 | /* | ||
365 | * Put the newly created PE into the child list and | ||
366 | * link the EEH device accordingly. | ||
367 | */ | ||
368 | list_add_tail(&pe->child, &parent->child_list); | ||
369 | list_add_tail(&edev->list, &pe->edevs); | ||
370 | edev->pe = pe; | ||
371 | pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", | ||
372 | edev->dn->full_name, pe->addr, pe->parent->addr); | ||
373 | |||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | /** | ||
378 | * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE | ||
379 | * @edev: EEH device | ||
380 | * | ||
381 | * The PE hierarchy tree might be changed when doing PCI hotplug. | ||
382 | * Also, the PCI devices or buses could be removed from the system | ||
383 | * during EEH recovery. So we have to call the function remove the | ||
384 | * corresponding PE accordingly if necessary. | ||
385 | */ | ||
386 | int eeh_rmv_from_parent_pe(struct eeh_dev *edev) | ||
387 | { | ||
388 | struct eeh_pe *pe, *parent; | ||
389 | |||
390 | if (!edev->pe) { | ||
391 | pr_warning("%s: No PE found for EEH device %s\n", | ||
392 | __func__, edev->dn->full_name); | ||
393 | return -EEXIST; | ||
394 | } | ||
395 | |||
396 | /* Remove the EEH device */ | ||
397 | pe = edev->pe; | ||
398 | edev->pe = NULL; | ||
399 | list_del(&edev->list); | ||
400 | |||
401 | /* | ||
402 | * Check if the parent PE includes any EEH devices. | ||
403 | * If not, we should delete that. Also, we should | ||
404 | * delete the parent PE if it doesn't have associated | ||
405 | * child PEs and EEH devices. | ||
406 | */ | ||
407 | while (1) { | ||
408 | parent = pe->parent; | ||
409 | if (pe->type == EEH_PE_PHB) | ||
410 | break; | ||
411 | |||
412 | if (list_empty(&pe->edevs) && | ||
413 | list_empty(&pe->child_list)) { | ||
414 | list_del(&pe->child); | ||
415 | kfree(pe); | ||
416 | } | ||
417 | |||
418 | pe = parent; | ||
419 | } | ||
420 | |||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | /** | ||
425 | * __eeh_pe_state_mark - Mark the state for the PE | ||
426 | * @data: EEH PE | ||
427 | * @flag: state | ||
428 | * | ||
429 | * The function is used to mark the indicated state for the given | ||
430 | * PE. Also, the associated PCI devices will be put into IO frozen | ||
431 | * state as well. | ||
432 | */ | ||
433 | static void *__eeh_pe_state_mark(void *data, void *flag) | ||
434 | { | ||
435 | struct eeh_pe *pe = (struct eeh_pe *)data; | ||
436 | int state = *((int *)flag); | ||
437 | struct eeh_dev *tmp; | ||
438 | struct pci_dev *pdev; | ||
439 | |||
440 | /* | ||
441 | * Mark the PE with the indicated state. Also, | ||
442 | * the associated PCI device will be put into | ||
443 | * I/O frozen state to avoid I/O accesses from | ||
444 | * the PCI device driver. | ||
445 | */ | ||
446 | pe->state |= state; | ||
447 | eeh_pe_for_each_dev(pe, tmp) { | ||
448 | pdev = eeh_dev_to_pci_dev(tmp); | ||
449 | if (pdev) | ||
450 | pdev->error_state = pci_channel_io_frozen; | ||
451 | } | ||
452 | |||
453 | return NULL; | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * eeh_pe_state_mark - Mark specified state for PE and its associated device | ||
458 | * @pe: EEH PE | ||
459 | * | ||
460 | * EEH error affects the current PE and its child PEs. The function | ||
461 | * is used to mark appropriate state for the affected PEs and the | ||
462 | * associated devices. | ||
463 | */ | ||
464 | void eeh_pe_state_mark(struct eeh_pe *pe, int state) | ||
465 | { | ||
466 | eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * __eeh_pe_state_clear - Clear state for the PE | ||
471 | * @data: EEH PE | ||
472 | * @flag: state | ||
473 | * | ||
474 | * The function is used to clear the indicated state from the | ||
475 | * given PE. Besides, we also clear the check count of the PE | ||
476 | * as well. | ||
477 | */ | ||
478 | static void *__eeh_pe_state_clear(void *data, void *flag) | ||
479 | { | ||
480 | struct eeh_pe *pe = (struct eeh_pe *)data; | ||
481 | int state = *((int *)flag); | ||
482 | |||
483 | pe->state &= ~state; | ||
484 | pe->check_count = 0; | ||
485 | |||
486 | return NULL; | ||
487 | } | ||
488 | |||
489 | /** | ||
490 | * eeh_pe_state_clear - Clear state for the PE and its children | ||
491 | * @pe: PE | ||
492 | * @state: state to be cleared | ||
493 | * | ||
494 | * When the PE and its children has been recovered from error, | ||
495 | * we need clear the error state for that. The function is used | ||
496 | * for the purpose. | ||
497 | */ | ||
498 | void eeh_pe_state_clear(struct eeh_pe *pe, int state) | ||
499 | { | ||
500 | eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); | ||
501 | } | ||
502 | |||
503 | /** | ||
504 | * eeh_restore_one_device_bars - Restore the Base Address Registers for one device | ||
505 | * @data: EEH device | ||
506 | * @flag: Unused | ||
507 | * | ||
508 | * Loads the PCI configuration space base address registers, | ||
509 | * the expansion ROM base address, the latency timer, and etc. | ||
510 | * from the saved values in the device node. | ||
511 | */ | ||
512 | static void *eeh_restore_one_device_bars(void *data, void *flag) | ||
513 | { | ||
514 | int i; | ||
515 | u32 cmd; | ||
516 | struct eeh_dev *edev = (struct eeh_dev *)data; | ||
517 | struct device_node *dn = eeh_dev_to_of_node(edev); | ||
518 | |||
519 | for (i = 4; i < 10; i++) | ||
520 | eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); | ||
521 | /* 12 == Expansion ROM Address */ | ||
522 | eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); | ||
523 | |||
524 | #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) | ||
525 | #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) | ||
526 | |||
527 | eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, | ||
528 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); | ||
529 | eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, | ||
530 | SAVED_BYTE(PCI_LATENCY_TIMER)); | ||
531 | |||
532 | /* max latency, min grant, interrupt pin and line */ | ||
533 | eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); | ||
534 | |||
535 | /* | ||
536 | * Restore PERR & SERR bits, some devices require it, | ||
537 | * don't touch the other command bits | ||
538 | */ | ||
539 | eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd); | ||
540 | if (edev->config_space[1] & PCI_COMMAND_PARITY) | ||
541 | cmd |= PCI_COMMAND_PARITY; | ||
542 | else | ||
543 | cmd &= ~PCI_COMMAND_PARITY; | ||
544 | if (edev->config_space[1] & PCI_COMMAND_SERR) | ||
545 | cmd |= PCI_COMMAND_SERR; | ||
546 | else | ||
547 | cmd &= ~PCI_COMMAND_SERR; | ||
548 | eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); | ||
549 | |||
550 | return NULL; | ||
551 | } | ||
552 | |||
553 | /** | ||
554 | * eeh_pe_restore_bars - Restore the PCI config space info | ||
555 | * @pe: EEH PE | ||
556 | * | ||
557 | * This routine performs a recursive walk to the children | ||
558 | * of this device as well. | ||
559 | */ | ||
560 | void eeh_pe_restore_bars(struct eeh_pe *pe) | ||
561 | { | ||
562 | eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * eeh_pe_bus_get - Retrieve PCI bus according to the given PE | ||
567 | * @pe: EEH PE | ||
568 | * | ||
569 | * Retrieve the PCI bus according to the given PE. Basically, | ||
570 | * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the | ||
571 | * primary PCI bus will be retrieved. The parent bus will be | ||
572 | * returned for BUS PE. However, we don't have associated PCI | ||
573 | * bus for DEVICE PE. | ||
574 | */ | ||
575 | struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) | ||
576 | { | ||
577 | struct pci_bus *bus = NULL; | ||
578 | struct eeh_dev *edev; | ||
579 | struct pci_dev *pdev; | ||
580 | |||
581 | if (pe->type == EEH_PE_PHB) { | ||
582 | bus = pe->phb->bus; | ||
583 | } else if (pe->type == EEH_PE_BUS) { | ||
584 | edev = list_first_entry(&pe->edevs, struct eeh_dev, list); | ||
585 | pdev = eeh_dev_to_pci_dev(edev); | ||
586 | if (pdev) | ||
587 | bus = pdev->bus; | ||
588 | } | ||
589 | |||
590 | return bus; | ||
591 | } | ||
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index c33360ec4f4f..19506f935737 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c | |||
@@ -129,27 +129,117 @@ static int pseries_eeh_init(void) | |||
129 | eeh_error_buf_size = RTAS_ERROR_LOG_MAX; | 129 | eeh_error_buf_size = RTAS_ERROR_LOG_MAX; |
130 | } | 130 | } |
131 | 131 | ||
132 | /* Set EEH probe mode */ | ||
133 | eeh_probe_mode_set(EEH_PROBE_MODE_DEVTREE); | ||
134 | |||
132 | return 0; | 135 | return 0; |
133 | } | 136 | } |
134 | 137 | ||
135 | /** | 138 | /** |
139 | * pseries_eeh_of_probe - EEH probe on the given device | ||
140 | * @dn: OF node | ||
141 | * @flag: Unused | ||
142 | * | ||
143 | * When EEH module is installed during system boot, all PCI devices | ||
144 | * are checked one by one to see if it supports EEH. The function | ||
145 | * is introduced for the purpose. | ||
146 | */ | ||
147 | static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) | ||
148 | { | ||
149 | struct eeh_dev *edev; | ||
150 | struct eeh_pe pe; | ||
151 | const u32 *class_code, *vendor_id, *device_id; | ||
152 | const u32 *regs; | ||
153 | int enable = 0; | ||
154 | int ret; | ||
155 | |||
156 | /* Retrieve OF node and eeh device */ | ||
157 | edev = of_node_to_eeh_dev(dn); | ||
158 | if (!of_device_is_available(dn)) | ||
159 | return NULL; | ||
160 | |||
161 | /* Retrieve class/vendor/device IDs */ | ||
162 | class_code = of_get_property(dn, "class-code", NULL); | ||
163 | vendor_id = of_get_property(dn, "vendor-id", NULL); | ||
164 | device_id = of_get_property(dn, "device-id", NULL); | ||
165 | |||
166 | /* Skip for bad OF node or PCI-ISA bridge */ | ||
167 | if (!class_code || !vendor_id || !device_id) | ||
168 | return NULL; | ||
169 | if (dn->type && !strcmp(dn->type, "isa")) | ||
170 | return NULL; | ||
171 | |||
172 | /* Update class code and mode of eeh device */ | ||
173 | edev->class_code = *class_code; | ||
174 | edev->mode = 0; | ||
175 | |||
176 | /* Retrieve the device address */ | ||
177 | regs = of_get_property(dn, "reg", NULL); | ||
178 | if (!regs) { | ||
179 | pr_warning("%s: OF node property %s::reg not found\n", | ||
180 | __func__, dn->full_name); | ||
181 | return NULL; | ||
182 | } | ||
183 | |||
184 | /* Initialize the fake PE */ | ||
185 | memset(&pe, 0, sizeof(struct eeh_pe)); | ||
186 | pe.phb = edev->phb; | ||
187 | pe.config_addr = regs[0]; | ||
188 | |||
189 | /* Enable EEH on the device */ | ||
190 | ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); | ||
191 | if (!ret) { | ||
192 | edev->config_addr = regs[0]; | ||
193 | /* Retrieve PE address */ | ||
194 | edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); | ||
195 | pe.addr = edev->pe_config_addr; | ||
196 | |||
197 | /* Some older systems (Power4) allow the ibm,set-eeh-option | ||
198 | * call to succeed even on nodes where EEH is not supported. | ||
199 | * Verify support explicitly. | ||
200 | */ | ||
201 | ret = eeh_ops->get_state(&pe, NULL); | ||
202 | if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT) | ||
203 | enable = 1; | ||
204 | |||
205 | if (enable) { | ||
206 | eeh_subsystem_enabled = 1; | ||
207 | eeh_add_to_parent_pe(edev); | ||
208 | |||
209 | pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", | ||
210 | __func__, dn->full_name, pe.phb->global_number, | ||
211 | pe.addr, pe.config_addr); | ||
212 | } else if (dn->parent && of_node_to_eeh_dev(dn->parent) && | ||
213 | (of_node_to_eeh_dev(dn->parent))->pe) { | ||
214 | /* This device doesn't support EEH, but it may have an | ||
215 | * EEH parent, in which case we mark it as supported. | ||
216 | */ | ||
217 | edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; | ||
218 | edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr; | ||
219 | eeh_add_to_parent_pe(edev); | ||
220 | } | ||
221 | } | ||
222 | |||
223 | /* Save memory bars */ | ||
224 | eeh_save_bars(edev); | ||
225 | |||
226 | return NULL; | ||
227 | } | ||
228 | |||
229 | /** | ||
136 | * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable | 230 | * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable |
137 | * @dn: device node | 231 | * @pe: EEH PE |
138 | * @option: operation to be issued | 232 | * @option: operation to be issued |
139 | * | 233 | * |
140 | * The function is used to control the EEH functionality globally. | 234 | * The function is used to control the EEH functionality globally. |
141 | * Currently, following options are support according to PAPR: | 235 | * Currently, following options are support according to PAPR: |
142 | * Enable EEH, Disable EEH, Enable MMIO and Enable DMA | 236 | * Enable EEH, Disable EEH, Enable MMIO and Enable DMA |
143 | */ | 237 | */ |
144 | static int pseries_eeh_set_option(struct device_node *dn, int option) | 238 | static int pseries_eeh_set_option(struct eeh_pe *pe, int option) |
145 | { | 239 | { |
146 | int ret = 0; | 240 | int ret = 0; |
147 | struct eeh_dev *edev; | ||
148 | const u32 *reg; | ||
149 | int config_addr; | 241 | int config_addr; |
150 | 242 | ||
151 | edev = of_node_to_eeh_dev(dn); | ||
152 | |||
153 | /* | 243 | /* |
154 | * When we're enabling or disabling EEH functioality on | 244 | * When we're enabling or disabling EEH functioality on |
155 | * the particular PE, the PE config address is possibly | 245 | * the particular PE, the PE config address is possibly |
@@ -159,15 +249,11 @@ static int pseries_eeh_set_option(struct device_node *dn, int option) | |||
159 | switch (option) { | 249 | switch (option) { |
160 | case EEH_OPT_DISABLE: | 250 | case EEH_OPT_DISABLE: |
161 | case EEH_OPT_ENABLE: | 251 | case EEH_OPT_ENABLE: |
162 | reg = of_get_property(dn, "reg", NULL); | ||
163 | config_addr = reg[0]; | ||
164 | break; | ||
165 | |||
166 | case EEH_OPT_THAW_MMIO: | 252 | case EEH_OPT_THAW_MMIO: |
167 | case EEH_OPT_THAW_DMA: | 253 | case EEH_OPT_THAW_DMA: |
168 | config_addr = edev->config_addr; | 254 | config_addr = pe->config_addr; |
169 | if (edev->pe_config_addr) | 255 | if (pe->addr) |
170 | config_addr = edev->pe_config_addr; | 256 | config_addr = pe->addr; |
171 | break; | 257 | break; |
172 | 258 | ||
173 | default: | 259 | default: |
@@ -177,15 +263,15 @@ static int pseries_eeh_set_option(struct device_node *dn, int option) | |||
177 | } | 263 | } |
178 | 264 | ||
179 | ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, | 265 | ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, |
180 | config_addr, BUID_HI(edev->phb->buid), | 266 | config_addr, BUID_HI(pe->phb->buid), |
181 | BUID_LO(edev->phb->buid), option); | 267 | BUID_LO(pe->phb->buid), option); |
182 | 268 | ||
183 | return ret; | 269 | return ret; |
184 | } | 270 | } |
185 | 271 | ||
186 | /** | 272 | /** |
187 | * pseries_eeh_get_pe_addr - Retrieve PE address | 273 | * pseries_eeh_get_pe_addr - Retrieve PE address |
188 | * @dn: device node | 274 | * @pe: EEH PE |
189 | * | 275 | * |
190 | * Retrieve the assocated PE address. Actually, there're 2 RTAS | 276 | * Retrieve the assocated PE address. Actually, there're 2 RTAS |
191 | * function calls dedicated for the purpose. We need implement | 277 | * function calls dedicated for the purpose. We need implement |
@@ -196,14 +282,11 @@ static int pseries_eeh_set_option(struct device_node *dn, int option) | |||
196 | * It's notable that zero'ed return value means invalid PE config | 282 | * It's notable that zero'ed return value means invalid PE config |
197 | * address. | 283 | * address. |
198 | */ | 284 | */ |
199 | static int pseries_eeh_get_pe_addr(struct device_node *dn) | 285 | static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) |
200 | { | 286 | { |
201 | struct eeh_dev *edev; | ||
202 | int ret = 0; | 287 | int ret = 0; |
203 | int rets[3]; | 288 | int rets[3]; |
204 | 289 | ||
205 | edev = of_node_to_eeh_dev(dn); | ||
206 | |||
207 | if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { | 290 | if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { |
208 | /* | 291 | /* |
209 | * First of all, we need to make sure there has one PE | 292 | * First of all, we need to make sure there has one PE |
@@ -211,18 +294,18 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn) | |||
211 | * meaningless. | 294 | * meaningless. |
212 | */ | 295 | */ |
213 | ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, | 296 | ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, |
214 | edev->config_addr, BUID_HI(edev->phb->buid), | 297 | pe->config_addr, BUID_HI(pe->phb->buid), |
215 | BUID_LO(edev->phb->buid), 1); | 298 | BUID_LO(pe->phb->buid), 1); |
216 | if (ret || (rets[0] == 0)) | 299 | if (ret || (rets[0] == 0)) |
217 | return 0; | 300 | return 0; |
218 | 301 | ||
219 | /* Retrieve the associated PE config address */ | 302 | /* Retrieve the associated PE config address */ |
220 | ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, | 303 | ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, |
221 | edev->config_addr, BUID_HI(edev->phb->buid), | 304 | pe->config_addr, BUID_HI(pe->phb->buid), |
222 | BUID_LO(edev->phb->buid), 0); | 305 | BUID_LO(pe->phb->buid), 0); |
223 | if (ret) { | 306 | if (ret) { |
224 | pr_warning("%s: Failed to get PE address for %s\n", | 307 | pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", |
225 | __func__, dn->full_name); | 308 | __func__, pe->phb->global_number, pe->config_addr); |
226 | return 0; | 309 | return 0; |
227 | } | 310 | } |
228 | 311 | ||
@@ -231,11 +314,11 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn) | |||
231 | 314 | ||
232 | if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { | 315 | if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { |
233 | ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, | 316 | ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, |
234 | edev->config_addr, BUID_HI(edev->phb->buid), | 317 | pe->config_addr, BUID_HI(pe->phb->buid), |
235 | BUID_LO(edev->phb->buid), 0); | 318 | BUID_LO(pe->phb->buid), 0); |
236 | if (ret) { | 319 | if (ret) { |
237 | pr_warning("%s: Failed to get PE address for %s\n", | 320 | pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n", |
238 | __func__, dn->full_name); | 321 | __func__, pe->phb->global_number, pe->config_addr); |
239 | return 0; | 322 | return 0; |
240 | } | 323 | } |
241 | 324 | ||
@@ -247,7 +330,7 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn) | |||
247 | 330 | ||
248 | /** | 331 | /** |
249 | * pseries_eeh_get_state - Retrieve PE state | 332 | * pseries_eeh_get_state - Retrieve PE state |
250 | * @dn: PE associated device node | 333 | * @pe: EEH PE |
251 | * @state: return value | 334 | * @state: return value |
252 | * | 335 | * |
253 | * Retrieve the state of the specified PE. On RTAS compliant | 336 | * Retrieve the state of the specified PE. On RTAS compliant |
@@ -258,30 +341,28 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn) | |||
258 | * RTAS calls for the purpose, we need to try the new one and back | 341 | * RTAS calls for the purpose, we need to try the new one and back |
259 | * to the old one if the new one couldn't work properly. | 342 | * to the old one if the new one couldn't work properly. |
260 | */ | 343 | */ |
261 | static int pseries_eeh_get_state(struct device_node *dn, int *state) | 344 | static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) |
262 | { | 345 | { |
263 | struct eeh_dev *edev; | ||
264 | int config_addr; | 346 | int config_addr; |
265 | int ret; | 347 | int ret; |
266 | int rets[4]; | 348 | int rets[4]; |
267 | int result; | 349 | int result; |
268 | 350 | ||
269 | /* Figure out PE config address if possible */ | 351 | /* Figure out PE config address if possible */ |
270 | edev = of_node_to_eeh_dev(dn); | 352 | config_addr = pe->config_addr; |
271 | config_addr = edev->config_addr; | 353 | if (pe->addr) |
272 | if (edev->pe_config_addr) | 354 | config_addr = pe->addr; |
273 | config_addr = edev->pe_config_addr; | ||
274 | 355 | ||
275 | if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { | 356 | if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { |
276 | ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, | 357 | ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, |
277 | config_addr, BUID_HI(edev->phb->buid), | 358 | config_addr, BUID_HI(pe->phb->buid), |
278 | BUID_LO(edev->phb->buid)); | 359 | BUID_LO(pe->phb->buid)); |
279 | } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { | 360 | } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { |
280 | /* Fake PE unavailable info */ | 361 | /* Fake PE unavailable info */ |
281 | rets[2] = 0; | 362 | rets[2] = 0; |
282 | ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, | 363 | ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, |
283 | config_addr, BUID_HI(edev->phb->buid), | 364 | config_addr, BUID_HI(pe->phb->buid), |
284 | BUID_LO(edev->phb->buid)); | 365 | BUID_LO(pe->phb->buid)); |
285 | } else { | 366 | } else { |
286 | return EEH_STATE_NOT_SUPPORT; | 367 | return EEH_STATE_NOT_SUPPORT; |
287 | } | 368 | } |
@@ -333,34 +414,32 @@ static int pseries_eeh_get_state(struct device_node *dn, int *state) | |||
333 | 414 | ||
334 | /** | 415 | /** |
335 | * pseries_eeh_reset - Reset the specified PE | 416 | * pseries_eeh_reset - Reset the specified PE |
336 | * @dn: PE associated device node | 417 | * @pe: EEH PE |
337 | * @option: reset option | 418 | * @option: reset option |
338 | * | 419 | * |
339 | * Reset the specified PE | 420 | * Reset the specified PE |
340 | */ | 421 | */ |
341 | static int pseries_eeh_reset(struct device_node *dn, int option) | 422 | static int pseries_eeh_reset(struct eeh_pe *pe, int option) |
342 | { | 423 | { |
343 | struct eeh_dev *edev; | ||
344 | int config_addr; | 424 | int config_addr; |
345 | int ret; | 425 | int ret; |
346 | 426 | ||
347 | /* Figure out PE address */ | 427 | /* Figure out PE address */ |
348 | edev = of_node_to_eeh_dev(dn); | 428 | config_addr = pe->config_addr; |
349 | config_addr = edev->config_addr; | 429 | if (pe->addr) |
350 | if (edev->pe_config_addr) | 430 | config_addr = pe->addr; |
351 | config_addr = edev->pe_config_addr; | ||
352 | 431 | ||
353 | /* Reset PE through RTAS call */ | 432 | /* Reset PE through RTAS call */ |
354 | ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, | 433 | ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, |
355 | config_addr, BUID_HI(edev->phb->buid), | 434 | config_addr, BUID_HI(pe->phb->buid), |
356 | BUID_LO(edev->phb->buid), option); | 435 | BUID_LO(pe->phb->buid), option); |
357 | 436 | ||
358 | /* If fundamental-reset not supported, try hot-reset */ | 437 | /* If fundamental-reset not supported, try hot-reset */ |
359 | if (option == EEH_RESET_FUNDAMENTAL && | 438 | if (option == EEH_RESET_FUNDAMENTAL && |
360 | ret == -8) { | 439 | ret == -8) { |
361 | ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, | 440 | ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, |
362 | config_addr, BUID_HI(edev->phb->buid), | 441 | config_addr, BUID_HI(pe->phb->buid), |
363 | BUID_LO(edev->phb->buid), EEH_RESET_HOT); | 442 | BUID_LO(pe->phb->buid), EEH_RESET_HOT); |
364 | } | 443 | } |
365 | 444 | ||
366 | return ret; | 445 | return ret; |
@@ -368,13 +447,13 @@ static int pseries_eeh_reset(struct device_node *dn, int option) | |||
368 | 447 | ||
369 | /** | 448 | /** |
370 | * pseries_eeh_wait_state - Wait for PE state | 449 | * pseries_eeh_wait_state - Wait for PE state |
371 | * @dn: PE associated device node | 450 | * @pe: EEH PE |
372 | * @max_wait: maximal period in microsecond | 451 | * @max_wait: maximal period in microsecond |
373 | * | 452 | * |
374 | * Wait for the state of associated PE. It might take some time | 453 | * Wait for the state of associated PE. It might take some time |
375 | * to retrieve the PE's state. | 454 | * to retrieve the PE's state. |
376 | */ | 455 | */ |
377 | static int pseries_eeh_wait_state(struct device_node *dn, int max_wait) | 456 | static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) |
378 | { | 457 | { |
379 | int ret; | 458 | int ret; |
380 | int mwait; | 459 | int mwait; |
@@ -391,7 +470,7 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait) | |||
391 | #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) | 470 | #define EEH_STATE_MAX_WAIT_TIME (300 * 1000) |
392 | 471 | ||
393 | while (1) { | 472 | while (1) { |
394 | ret = pseries_eeh_get_state(dn, &mwait); | 473 | ret = pseries_eeh_get_state(pe, &mwait); |
395 | 474 | ||
396 | /* | 475 | /* |
397 | * If the PE's state is temporarily unavailable, | 476 | * If the PE's state is temporarily unavailable, |
@@ -426,7 +505,7 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait) | |||
426 | 505 | ||
427 | /** | 506 | /** |
428 | * pseries_eeh_get_log - Retrieve error log | 507 | * pseries_eeh_get_log - Retrieve error log |
429 | * @dn: device node | 508 | * @pe: EEH PE |
430 | * @severity: temporary or permanent error log | 509 | * @severity: temporary or permanent error log |
431 | * @drv_log: driver log to be combined with retrieved error log | 510 | * @drv_log: driver log to be combined with retrieved error log |
432 | * @len: length of driver log | 511 | * @len: length of driver log |
@@ -435,24 +514,22 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait) | |||
435 | * Actually, the error will be retrieved through the dedicated | 514 | * Actually, the error will be retrieved through the dedicated |
436 | * RTAS call. | 515 | * RTAS call. |
437 | */ | 516 | */ |
438 | static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len) | 517 | static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) |
439 | { | 518 | { |
440 | struct eeh_dev *edev; | ||
441 | int config_addr; | 519 | int config_addr; |
442 | unsigned long flags; | 520 | unsigned long flags; |
443 | int ret; | 521 | int ret; |
444 | 522 | ||
445 | edev = of_node_to_eeh_dev(dn); | ||
446 | spin_lock_irqsave(&slot_errbuf_lock, flags); | 523 | spin_lock_irqsave(&slot_errbuf_lock, flags); |
447 | memset(slot_errbuf, 0, eeh_error_buf_size); | 524 | memset(slot_errbuf, 0, eeh_error_buf_size); |
448 | 525 | ||
449 | /* Figure out the PE address */ | 526 | /* Figure out the PE address */ |
450 | config_addr = edev->config_addr; | 527 | config_addr = pe->config_addr; |
451 | if (edev->pe_config_addr) | 528 | if (pe->addr) |
452 | config_addr = edev->pe_config_addr; | 529 | config_addr = pe->addr; |
453 | 530 | ||
454 | ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, | 531 | ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, |
455 | BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), | 532 | BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid), |
456 | virt_to_phys(drv_log), len, | 533 | virt_to_phys(drv_log), len, |
457 | virt_to_phys(slot_errbuf), eeh_error_buf_size, | 534 | virt_to_phys(slot_errbuf), eeh_error_buf_size, |
458 | severity); | 535 | severity); |
@@ -465,40 +542,38 @@ static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_l | |||
465 | 542 | ||
466 | /** | 543 | /** |
467 | * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE | 544 | * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE |
468 | * @dn: PE associated device node | 545 | * @pe: EEH PE |
469 | * | 546 | * |
470 | * The function will be called to reconfigure the bridges included | 547 | * The function will be called to reconfigure the bridges included |
471 | * in the specified PE so that the mulfunctional PE would be recovered | 548 | * in the specified PE so that the mulfunctional PE would be recovered |
472 | * again. | 549 | * again. |
473 | */ | 550 | */ |
474 | static int pseries_eeh_configure_bridge(struct device_node *dn) | 551 | static int pseries_eeh_configure_bridge(struct eeh_pe *pe) |
475 | { | 552 | { |
476 | struct eeh_dev *edev; | ||
477 | int config_addr; | 553 | int config_addr; |
478 | int ret; | 554 | int ret; |
479 | 555 | ||
480 | /* Figure out the PE address */ | 556 | /* Figure out the PE address */ |
481 | edev = of_node_to_eeh_dev(dn); | 557 | config_addr = pe->config_addr; |
482 | config_addr = edev->config_addr; | 558 | if (pe->addr) |
483 | if (edev->pe_config_addr) | 559 | config_addr = pe->addr; |
484 | config_addr = edev->pe_config_addr; | ||
485 | 560 | ||
486 | /* Use new configure-pe function, if supported */ | 561 | /* Use new configure-pe function, if supported */ |
487 | if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { | 562 | if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { |
488 | ret = rtas_call(ibm_configure_pe, 3, 1, NULL, | 563 | ret = rtas_call(ibm_configure_pe, 3, 1, NULL, |
489 | config_addr, BUID_HI(edev->phb->buid), | 564 | config_addr, BUID_HI(pe->phb->buid), |
490 | BUID_LO(edev->phb->buid)); | 565 | BUID_LO(pe->phb->buid)); |
491 | } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { | 566 | } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { |
492 | ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, | 567 | ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, |
493 | config_addr, BUID_HI(edev->phb->buid), | 568 | config_addr, BUID_HI(pe->phb->buid), |
494 | BUID_LO(edev->phb->buid)); | 569 | BUID_LO(pe->phb->buid)); |
495 | } else { | 570 | } else { |
496 | return -EFAULT; | 571 | return -EFAULT; |
497 | } | 572 | } |
498 | 573 | ||
499 | if (ret) | 574 | if (ret) |
500 | pr_warning("%s: Unable to configure bridge %d for %s\n", | 575 | pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", |
501 | __func__, ret, dn->full_name); | 576 | __func__, pe->phb->global_number, pe->addr, ret); |
502 | 577 | ||
503 | return ret; | 578 | return ret; |
504 | } | 579 | } |
@@ -542,6 +617,8 @@ static int pseries_eeh_write_config(struct device_node *dn, int where, int size, | |||
542 | static struct eeh_ops pseries_eeh_ops = { | 617 | static struct eeh_ops pseries_eeh_ops = { |
543 | .name = "pseries", | 618 | .name = "pseries", |
544 | .init = pseries_eeh_init, | 619 | .init = pseries_eeh_init, |
620 | .of_probe = pseries_eeh_of_probe, | ||
621 | .dev_probe = NULL, | ||
545 | .set_option = pseries_eeh_set_option, | 622 | .set_option = pseries_eeh_set_option, |
546 | .get_pe_addr = pseries_eeh_get_pe_addr, | 623 | .get_pe_addr = pseries_eeh_get_pe_addr, |
547 | .get_state = pseries_eeh_get_state, | 624 | .get_state = pseries_eeh_get_state, |
@@ -559,7 +636,21 @@ static struct eeh_ops pseries_eeh_ops = { | |||
559 | * EEH initialization on pseries platform. This function should be | 636 | * EEH initialization on pseries platform. This function should be |
560 | * called before any EEH related functions. | 637 | * called before any EEH related functions. |
561 | */ | 638 | */ |
562 | int __init eeh_pseries_init(void) | 639 | static int __init eeh_pseries_init(void) |
563 | { | 640 | { |
564 | return eeh_ops_register(&pseries_eeh_ops); | 641 | int ret = -EINVAL; |
642 | |||
643 | if (!machine_is(pseries)) | ||
644 | return ret; | ||
645 | |||
646 | ret = eeh_ops_register(&pseries_eeh_ops); | ||
647 | if (!ret) | ||
648 | pr_info("EEH: pSeries platform initialized\n"); | ||
649 | else | ||
650 | pr_info("EEH: pSeries platform initialization failure (%d)\n", | ||
651 | ret); | ||
652 | |||
653 | return ret; | ||
565 | } | 654 | } |
655 | |||
656 | early_initcall(eeh_pseries_init); | ||
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c index 243b3510d70f..d37708360f2e 100644 --- a/arch/powerpc/platforms/pseries/eeh_sysfs.c +++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c | |||
@@ -53,9 +53,6 @@ static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL); | |||
53 | EEH_SHOW_ATTR(eeh_mode, mode, "0x%x"); | 53 | EEH_SHOW_ATTR(eeh_mode, mode, "0x%x"); |
54 | EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); | 54 | EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); |
55 | EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); | 55 | EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); |
56 | EEH_SHOW_ATTR(eeh_check_count, check_count, "%d" ); | ||
57 | EEH_SHOW_ATTR(eeh_freeze_count, freeze_count, "%d" ); | ||
58 | EEH_SHOW_ATTR(eeh_false_positives, false_positives, "%d" ); | ||
59 | 56 | ||
60 | void eeh_sysfs_add_device(struct pci_dev *pdev) | 57 | void eeh_sysfs_add_device(struct pci_dev *pdev) |
61 | { | 58 | { |
@@ -64,9 +61,6 @@ void eeh_sysfs_add_device(struct pci_dev *pdev) | |||
64 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); | 61 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); |
65 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); | 62 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); |
66 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); | 63 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); |
67 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count); | ||
68 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives); | ||
69 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count); | ||
70 | 64 | ||
71 | if (rc) | 65 | if (rc) |
72 | printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); | 66 | printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); |
@@ -77,8 +71,5 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev) | |||
77 | device_remove_file(&pdev->dev, &dev_attr_eeh_mode); | 71 | device_remove_file(&pdev->dev, &dev_attr_eeh_mode); |
78 | device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); | 72 | device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); |
79 | device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); | 73 | device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); |
80 | device_remove_file(&pdev->dev, &dev_attr_eeh_check_count); | ||
81 | device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives); | ||
82 | device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count); | ||
83 | } | 74 | } |
84 | 75 | ||
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index bca220f2873c..6153eea27ce7 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
31 | #include <linux/memblock.h> | ||
31 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
32 | #include <linux/sched.h> /* for show_stack */ | 33 | #include <linux/sched.h> /* for show_stack */ |
33 | #include <linux/string.h> | 34 | #include <linux/string.h> |
@@ -41,7 +42,6 @@ | |||
41 | #include <asm/iommu.h> | 42 | #include <asm/iommu.h> |
42 | #include <asm/pci-bridge.h> | 43 | #include <asm/pci-bridge.h> |
43 | #include <asm/machdep.h> | 44 | #include <asm/machdep.h> |
44 | #include <asm/abs_addr.h> | ||
45 | #include <asm/pSeries_reconfig.h> | 45 | #include <asm/pSeries_reconfig.h> |
46 | #include <asm/firmware.h> | 46 | #include <asm/firmware.h> |
47 | #include <asm/tce.h> | 47 | #include <asm/tce.h> |
@@ -99,7 +99,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, | |||
99 | 99 | ||
100 | while (npages--) { | 100 | while (npages--) { |
101 | /* can't move this out since we might cross MEMBLOCK boundary */ | 101 | /* can't move this out since we might cross MEMBLOCK boundary */ |
102 | rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; | 102 | rpn = __pa(uaddr) >> TCE_SHIFT; |
103 | *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; | 103 | *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; |
104 | 104 | ||
105 | uaddr += TCE_PAGE_SIZE; | 105 | uaddr += TCE_PAGE_SIZE; |
@@ -148,7 +148,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
148 | int ret = 0; | 148 | int ret = 0; |
149 | long tcenum_start = tcenum, npages_start = npages; | 149 | long tcenum_start = tcenum, npages_start = npages; |
150 | 150 | ||
151 | rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; | 151 | rpn = __pa(uaddr) >> TCE_SHIFT; |
152 | proto_tce = TCE_PCI_READ; | 152 | proto_tce = TCE_PCI_READ; |
153 | if (direction != DMA_TO_DEVICE) | 153 | if (direction != DMA_TO_DEVICE) |
154 | proto_tce |= TCE_PCI_WRITE; | 154 | proto_tce |= TCE_PCI_WRITE; |
@@ -217,7 +217,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
217 | __get_cpu_var(tce_page) = tcep; | 217 | __get_cpu_var(tce_page) = tcep; |
218 | } | 218 | } |
219 | 219 | ||
220 | rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; | 220 | rpn = __pa(uaddr) >> TCE_SHIFT; |
221 | proto_tce = TCE_PCI_READ; | 221 | proto_tce = TCE_PCI_READ; |
222 | if (direction != DMA_TO_DEVICE) | 222 | if (direction != DMA_TO_DEVICE) |
223 | proto_tce |= TCE_PCI_WRITE; | 223 | proto_tce |= TCE_PCI_WRITE; |
@@ -237,7 +237,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
237 | 237 | ||
238 | rc = plpar_tce_put_indirect((u64)tbl->it_index, | 238 | rc = plpar_tce_put_indirect((u64)tbl->it_index, |
239 | (u64)tcenum << 12, | 239 | (u64)tcenum << 12, |
240 | (u64)virt_to_abs(tcep), | 240 | (u64)__pa(tcep), |
241 | limit); | 241 | limit); |
242 | 242 | ||
243 | npages -= limit; | 243 | npages -= limit; |
@@ -441,7 +441,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | |||
441 | 441 | ||
442 | rc = plpar_tce_put_indirect(liobn, | 442 | rc = plpar_tce_put_indirect(liobn, |
443 | dma_offset, | 443 | dma_offset, |
444 | (u64)virt_to_abs(tcep), | 444 | (u64)__pa(tcep), |
445 | limit); | 445 | limit); |
446 | 446 | ||
447 | num_tce -= limit; | 447 | num_tce -= limit; |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 5f3ef876ded2..177055d0186b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <asm/page.h> | 31 | #include <asm/page.h> |
32 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
33 | #include <asm/machdep.h> | 33 | #include <asm/machdep.h> |
34 | #include <asm/abs_addr.h> | ||
35 | #include <asm/mmu_context.h> | 34 | #include <asm/mmu_context.h> |
36 | #include <asm/iommu.h> | 35 | #include <asm/iommu.h> |
37 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 109fdb75578d..d19f4977c834 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -210,6 +210,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | |||
210 | static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) | 210 | static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) |
211 | { | 211 | { |
212 | struct device_node *dn; | 212 | struct device_node *dn; |
213 | struct eeh_dev *edev; | ||
213 | 214 | ||
214 | /* Found our PE and assume 8 at that point. */ | 215 | /* Found our PE and assume 8 at that point. */ |
215 | 216 | ||
@@ -217,7 +218,10 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) | |||
217 | if (!dn) | 218 | if (!dn) |
218 | return NULL; | 219 | return NULL; |
219 | 220 | ||
220 | dn = eeh_find_device_pe(dn); | 221 | /* Get the top level device in the PE */ |
222 | edev = of_node_to_eeh_dev(dn); | ||
223 | edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); | ||
224 | dn = eeh_dev_to_of_node(edev); | ||
221 | if (!dn) | 225 | if (!dn) |
222 | return NULL; | 226 | return NULL; |
223 | 227 | ||
@@ -387,12 +391,13 @@ static int check_msix_entries(struct pci_dev *pdev) | |||
387 | return 0; | 391 | return 0; |
388 | } | 392 | } |
389 | 393 | ||
390 | static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | 394 | static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) |
391 | { | 395 | { |
392 | struct pci_dn *pdn; | 396 | struct pci_dn *pdn; |
393 | int hwirq, virq, i, rc; | 397 | int hwirq, virq, i, rc; |
394 | struct msi_desc *entry; | 398 | struct msi_desc *entry; |
395 | struct msi_msg msg; | 399 | struct msi_msg msg; |
400 | int nvec = nvec_in; | ||
396 | 401 | ||
397 | pdn = get_pdn(pdev); | 402 | pdn = get_pdn(pdev); |
398 | if (!pdn) | 403 | if (!pdn) |
@@ -402,10 +407,23 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
402 | return -EINVAL; | 407 | return -EINVAL; |
403 | 408 | ||
404 | /* | 409 | /* |
410 | * Firmware currently refuse any non power of two allocation | ||
411 | * so we round up if the quota will allow it. | ||
412 | */ | ||
413 | if (type == PCI_CAP_ID_MSIX) { | ||
414 | int m = roundup_pow_of_two(nvec); | ||
415 | int quota = msi_quota_for_device(pdev, m); | ||
416 | |||
417 | if (quota >= m) | ||
418 | nvec = m; | ||
419 | } | ||
420 | |||
421 | /* | ||
405 | * Try the new more explicit firmware interface, if that fails fall | 422 | * Try the new more explicit firmware interface, if that fails fall |
406 | * back to the old interface. The old interface is known to never | 423 | * back to the old interface. The old interface is known to never |
407 | * return MSI-Xs. | 424 | * return MSI-Xs. |
408 | */ | 425 | */ |
426 | again: | ||
409 | if (type == PCI_CAP_ID_MSI) { | 427 | if (type == PCI_CAP_ID_MSI) { |
410 | rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); | 428 | rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); |
411 | 429 | ||
@@ -417,6 +435,10 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
417 | rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); | 435 | rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); |
418 | 436 | ||
419 | if (rc != nvec) { | 437 | if (rc != nvec) { |
438 | if (nvec != nvec_in) { | ||
439 | nvec = nvec_in; | ||
440 | goto again; | ||
441 | } | ||
420 | pr_debug("rtas_msi: rtas_change_msi() failed\n"); | 442 | pr_debug("rtas_msi: rtas_change_msi() failed\n"); |
421 | return rc; | 443 | return rc; |
422 | } | 444 | } |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 2c6ded29f73d..56b864d777ee 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
@@ -73,7 +73,7 @@ void __init pSeries_final_fixup(void) | |||
73 | { | 73 | { |
74 | pSeries_request_regions(); | 74 | pSeries_request_regions(); |
75 | 75 | ||
76 | pci_addr_cache_build(); | 76 | eeh_addr_cache_build(); |
77 | } | 77 | } |
78 | 78 | ||
79 | /* | 79 | /* |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 51ecac920dd8..e3cb7ae61658 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -388,10 +388,8 @@ static void __init pSeries_setup_arch(void) | |||
388 | 388 | ||
389 | /* Find and initialize PCI host bridges */ | 389 | /* Find and initialize PCI host bridges */ |
390 | init_pci_config_tokens(); | 390 | init_pci_config_tokens(); |
391 | eeh_pseries_init(); | ||
392 | find_and_init_phbs(); | 391 | find_and_init_phbs(); |
393 | pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); | 392 | pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); |
394 | eeh_init(); | ||
395 | 393 | ||
396 | pSeries_nvram_init(); | 394 | pSeries_nvram_init(); |
397 | 395 | ||
@@ -416,16 +414,20 @@ static int __init pSeries_init_panel(void) | |||
416 | } | 414 | } |
417 | machine_arch_initcall(pseries, pSeries_init_panel); | 415 | machine_arch_initcall(pseries, pSeries_init_panel); |
418 | 416 | ||
419 | static int pseries_set_dabr(unsigned long dabr) | 417 | static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx) |
420 | { | 418 | { |
421 | return plpar_hcall_norets(H_SET_DABR, dabr); | 419 | return plpar_hcall_norets(H_SET_DABR, dabr); |
422 | } | 420 | } |
423 | 421 | ||
424 | static int pseries_set_xdabr(unsigned long dabr) | 422 | static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx) |
425 | { | 423 | { |
426 | /* We want to catch accesses from kernel and userspace */ | 424 | /* Have to set at least one bit in the DABRX according to PAPR */ |
427 | return plpar_hcall_norets(H_SET_XDABR, dabr, | 425 | if (dabrx == 0 && dabr == 0) |
428 | H_DABRX_KERNEL | H_DABRX_USER); | 426 | dabrx = DABRX_USER; |
427 | /* PAPR says we can only set kernel and user bits */ | ||
428 | dabrx &= DABRX_KERNEL | DABRX_USER; | ||
429 | |||
430 | return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx); | ||
429 | } | 431 | } |
430 | 432 | ||
431 | #define CMO_CHARACTERISTICS_TOKEN 44 | 433 | #define CMO_CHARACTERISTICS_TOKEN 44 |
@@ -529,10 +531,10 @@ static void __init pSeries_init_early(void) | |||
529 | if (firmware_has_feature(FW_FEATURE_LPAR)) | 531 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
530 | hvc_vio_init_early(); | 532 | hvc_vio_init_early(); |
531 | #endif | 533 | #endif |
532 | if (firmware_has_feature(FW_FEATURE_DABR)) | 534 | if (firmware_has_feature(FW_FEATURE_XDABR)) |
533 | ppc_md.set_dabr = pseries_set_dabr; | ||
534 | else if (firmware_has_feature(FW_FEATURE_XDABR)) | ||
535 | ppc_md.set_dabr = pseries_set_xdabr; | 535 | ppc_md.set_dabr = pseries_set_xdabr; |
536 | else if (firmware_has_feature(FW_FEATURE_DABR)) | ||
537 | ppc_md.set_dabr = pseries_set_dabr; | ||
536 | 538 | ||
537 | pSeries_cmo_feature_init(); | 539 | pSeries_cmo_feature_init(); |
538 | iommu_init_early_pSeries(); | 540 | iommu_init_early_pSeries(); |
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 4f2680f431b5..8ef63a01e345 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <asm/iommu.h> | 43 | #include <asm/iommu.h> |
44 | #include <asm/pci-bridge.h> | 44 | #include <asm/pci-bridge.h> |
45 | #include <asm/machdep.h> | 45 | #include <asm/machdep.h> |
46 | #include <asm/abs_addr.h> | ||
47 | #include <asm/cacheflush.h> | 46 | #include <asm/cacheflush.h> |
48 | #include <asm/ppc-pci.h> | 47 | #include <asm/ppc-pci.h> |
49 | 48 | ||
@@ -167,7 +166,7 @@ static int dart_build(struct iommu_table *tbl, long index, | |||
167 | */ | 166 | */ |
168 | l = npages; | 167 | l = npages; |
169 | while (l--) { | 168 | while (l--) { |
170 | rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT; | 169 | rpn = __pa(uaddr) >> DART_PAGE_SHIFT; |
171 | 170 | ||
172 | *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); | 171 | *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); |
173 | 172 | ||
@@ -244,7 +243,7 @@ static int __init dart_init(struct device_node *dart_node) | |||
244 | panic("DART: Cannot map registers!"); | 243 | panic("DART: Cannot map registers!"); |
245 | 244 | ||
246 | /* Map in DART table */ | 245 | /* Map in DART table */ |
247 | dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); | 246 | dart_vbase = ioremap(__pa(dart_tablebase), dart_tablesize); |
248 | 247 | ||
249 | /* Fill initial table */ | 248 | /* Fill initial table */ |
250 | for (i = 0; i < dart_tablesize/4; i++) | 249 | for (i = 0; i < dart_tablesize/4; i++) |
@@ -463,7 +462,7 @@ void __init alloc_dart_table(void) | |||
463 | * will blow up an entire large page anyway in the kernel mapping | 462 | * will blow up an entire large page anyway in the kernel mapping |
464 | */ | 463 | */ |
465 | dart_tablebase = (unsigned long) | 464 | dart_tablebase = (unsigned long) |
466 | abs_to_virt(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); | 465 | __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); |
467 | 466 | ||
468 | printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); | 467 | printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); |
469 | } | 468 | } |
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 9b49c65ee7a4..987f441525cb 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -740,7 +740,7 @@ static void insert_bpts(void) | |||
740 | static void insert_cpu_bpts(void) | 740 | static void insert_cpu_bpts(void) |
741 | { | 741 | { |
742 | if (dabr.enabled) | 742 | if (dabr.enabled) |
743 | set_dabr(dabr.address | (dabr.enabled & 7)); | 743 | set_dabr(dabr.address | (dabr.enabled & 7), DABRX_ALL); |
744 | if (iabr && cpu_has_feature(CPU_FTR_IABR)) | 744 | if (iabr && cpu_has_feature(CPU_FTR_IABR)) |
745 | mtspr(SPRN_IABR, iabr->address | 745 | mtspr(SPRN_IABR, iabr->address |
746 | | (iabr->enabled & (BP_IABR|BP_IABR_TE))); | 746 | | (iabr->enabled & (BP_IABR|BP_IABR_TE))); |
@@ -768,7 +768,7 @@ static void remove_bpts(void) | |||
768 | 768 | ||
769 | static void remove_cpu_bpts(void) | 769 | static void remove_cpu_bpts(void) |
770 | { | 770 | { |
771 | set_dabr(0); | 771 | set_dabr(0, 0); |
772 | if (cpu_has_feature(CPU_FTR_IABR)) | 772 | if (cpu_has_feature(CPU_FTR_IABR)) |
773 | mtspr(SPRN_IABR, 0); | 773 | mtspr(SPRN_IABR, 0); |
774 | } | 774 | } |