diff options
Diffstat (limited to 'arch/powerpc/kernel')
33 files changed, 1195 insertions, 787 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 93e1465c8496..f5995a912213 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -248,6 +248,7 @@ int main(void) | |||
248 | #endif | 248 | #endif |
249 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | 249 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
250 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); | 250 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); |
251 | DEFINE(PACA_DSCR, offsetof(struct paca_struct, dscr_default)); | ||
251 | DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); | 252 | DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); |
252 | DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user)); | 253 | DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user)); |
253 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); | 254 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); |
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index cc2d8962e090..4f1393d20079 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S | |||
@@ -94,12 +94,12 @@ _GLOBAL(setup_altivec_idle) | |||
94 | _GLOBAL(__setup_cpu_e6500) | 94 | _GLOBAL(__setup_cpu_e6500) |
95 | mflr r6 | 95 | mflr r6 |
96 | #ifdef CONFIG_PPC64 | 96 | #ifdef CONFIG_PPC64 |
97 | bl .setup_altivec_ivors | 97 | bl setup_altivec_ivors |
98 | /* Touch IVOR42 only if the CPU supports E.HV category */ | 98 | /* Touch IVOR42 only if the CPU supports E.HV category */ |
99 | mfspr r10,SPRN_MMUCFG | 99 | mfspr r10,SPRN_MMUCFG |
100 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | 100 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE |
101 | beq 1f | 101 | beq 1f |
102 | bl .setup_lrat_ivor | 102 | bl setup_lrat_ivor |
103 | 1: | 103 | 1: |
104 | #endif | 104 | #endif |
105 | bl setup_pw20_idle | 105 | bl setup_pw20_idle |
@@ -164,15 +164,15 @@ _GLOBAL(__setup_cpu_e5500) | |||
164 | #ifdef CONFIG_PPC_BOOK3E_64 | 164 | #ifdef CONFIG_PPC_BOOK3E_64 |
165 | _GLOBAL(__restore_cpu_e6500) | 165 | _GLOBAL(__restore_cpu_e6500) |
166 | mflr r5 | 166 | mflr r5 |
167 | bl .setup_altivec_ivors | 167 | bl setup_altivec_ivors |
168 | /* Touch IVOR42 only if the CPU supports E.HV category */ | 168 | /* Touch IVOR42 only if the CPU supports E.HV category */ |
169 | mfspr r10,SPRN_MMUCFG | 169 | mfspr r10,SPRN_MMUCFG |
170 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | 170 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE |
171 | beq 1f | 171 | beq 1f |
172 | bl .setup_lrat_ivor | 172 | bl setup_lrat_ivor |
173 | 1: | 173 | 1: |
174 | bl .setup_pw20_idle | 174 | bl setup_pw20_idle |
175 | bl .setup_altivec_idle | 175 | bl setup_altivec_idle |
176 | bl __restore_cpu_e5500 | 176 | bl __restore_cpu_e5500 |
177 | mtlr r5 | 177 | mtlr r5 |
178 | blr | 178 | blr |
@@ -181,9 +181,9 @@ _GLOBAL(__restore_cpu_e5500) | |||
181 | mflr r4 | 181 | mflr r4 |
182 | bl __e500_icache_setup | 182 | bl __e500_icache_setup |
183 | bl __e500_dcache_setup | 183 | bl __e500_dcache_setup |
184 | bl .__setup_base_ivors | 184 | bl __setup_base_ivors |
185 | bl .setup_perfmon_ivor | 185 | bl setup_perfmon_ivor |
186 | bl .setup_doorbell_ivors | 186 | bl setup_doorbell_ivors |
187 | /* | 187 | /* |
188 | * We only want to touch IVOR38-41 if we're running on hardware | 188 | * We only want to touch IVOR38-41 if we're running on hardware |
189 | * that supports category E.HV. The architectural way to determine | 189 | * that supports category E.HV. The architectural way to determine |
@@ -192,7 +192,7 @@ _GLOBAL(__restore_cpu_e5500) | |||
192 | mfspr r10,SPRN_MMUCFG | 192 | mfspr r10,SPRN_MMUCFG |
193 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | 193 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE |
194 | beq 1f | 194 | beq 1f |
195 | bl .setup_ehv_ivors | 195 | bl setup_ehv_ivors |
196 | 1: | 196 | 1: |
197 | mtlr r4 | 197 | mtlr r4 |
198 | blr | 198 | blr |
@@ -201,9 +201,9 @@ _GLOBAL(__setup_cpu_e5500) | |||
201 | mflr r5 | 201 | mflr r5 |
202 | bl __e500_icache_setup | 202 | bl __e500_icache_setup |
203 | bl __e500_dcache_setup | 203 | bl __e500_dcache_setup |
204 | bl .__setup_base_ivors | 204 | bl __setup_base_ivors |
205 | bl .setup_perfmon_ivor | 205 | bl setup_perfmon_ivor |
206 | bl .setup_doorbell_ivors | 206 | bl setup_doorbell_ivors |
207 | /* | 207 | /* |
208 | * We only want to touch IVOR38-41 if we're running on hardware | 208 | * We only want to touch IVOR38-41 if we're running on hardware |
209 | * that supports category E.HV. The architectural way to determine | 209 | * that supports category E.HV. The architectural way to determine |
@@ -212,7 +212,7 @@ _GLOBAL(__setup_cpu_e5500) | |||
212 | mfspr r10,SPRN_MMUCFG | 212 | mfspr r10,SPRN_MMUCFG |
213 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | 213 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE |
214 | beq 1f | 214 | beq 1f |
215 | bl .setup_ehv_ivors | 215 | bl setup_ehv_ivors |
216 | b 2f | 216 | b 2f |
217 | 1: | 217 | 1: |
218 | ld r10,CPU_SPEC_FEATURES(r4) | 218 | ld r10,CPU_SPEC_FEATURES(r4) |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index e7b76a6bf150..7051ea3101b9 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -22,6 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/debugfs.h> | ||
25 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
26 | #include <linux/init.h> | 27 | #include <linux/init.h> |
27 | #include <linux/list.h> | 28 | #include <linux/list.h> |
@@ -35,6 +36,7 @@ | |||
35 | #include <linux/of.h> | 36 | #include <linux/of.h> |
36 | 37 | ||
37 | #include <linux/atomic.h> | 38 | #include <linux/atomic.h> |
39 | #include <asm/debug.h> | ||
38 | #include <asm/eeh.h> | 40 | #include <asm/eeh.h> |
39 | #include <asm/eeh_event.h> | 41 | #include <asm/eeh_event.h> |
40 | #include <asm/io.h> | 42 | #include <asm/io.h> |
@@ -87,22 +89,21 @@ | |||
87 | /* Time to wait for a PCI slot to report status, in milliseconds */ | 89 | /* Time to wait for a PCI slot to report status, in milliseconds */ |
88 | #define PCI_BUS_RESET_WAIT_MSEC (5*60*1000) | 90 | #define PCI_BUS_RESET_WAIT_MSEC (5*60*1000) |
89 | 91 | ||
90 | /* Platform dependent EEH operations */ | ||
91 | struct eeh_ops *eeh_ops = NULL; | ||
92 | |||
93 | bool eeh_subsystem_enabled = false; | ||
94 | EXPORT_SYMBOL(eeh_subsystem_enabled); | ||
95 | |||
96 | /* | 92 | /* |
97 | * EEH probe mode support. The intention is to support multiple | 93 | * EEH probe mode support, which is part of the flags, |
98 | * platforms for EEH. Some platforms like pSeries do PCI emunation | 94 | * is to support multiple platforms for EEH. Some platforms |
99 | * based on device tree. However, other platforms like powernv probe | 95 | * like pSeries do PCI emunation based on device tree. |
100 | * PCI devices from hardware. The flag is used to distinguish that. | 96 | * However, other platforms like powernv probe PCI devices |
101 | * In addition, struct eeh_ops::probe would be invoked for particular | 97 | * from hardware. The flag is used to distinguish that. |
102 | * OF node or PCI device so that the corresponding PE would be created | 98 | * In addition, struct eeh_ops::probe would be invoked for |
103 | * there. | 99 | * particular OF node or PCI device so that the corresponding |
100 | * PE would be created there. | ||
104 | */ | 101 | */ |
105 | int eeh_probe_mode; | 102 | int eeh_subsystem_flags; |
103 | EXPORT_SYMBOL(eeh_subsystem_flags); | ||
104 | |||
105 | /* Platform dependent EEH operations */ | ||
106 | struct eeh_ops *eeh_ops = NULL; | ||
106 | 107 | ||
107 | /* Lock to avoid races due to multiple reports of an error */ | 108 | /* Lock to avoid races due to multiple reports of an error */ |
108 | DEFINE_RAW_SPINLOCK(confirm_error_lock); | 109 | DEFINE_RAW_SPINLOCK(confirm_error_lock); |
@@ -133,6 +134,15 @@ static struct eeh_stats eeh_stats; | |||
133 | 134 | ||
134 | #define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE) | 135 | #define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE) |
135 | 136 | ||
137 | static int __init eeh_setup(char *str) | ||
138 | { | ||
139 | if (!strcmp(str, "off")) | ||
140 | eeh_subsystem_flags |= EEH_FORCE_DISABLED; | ||
141 | |||
142 | return 1; | ||
143 | } | ||
144 | __setup("eeh=", eeh_setup); | ||
145 | |||
136 | /** | 146 | /** |
137 | * eeh_gather_pci_data - Copy assorted PCI config space registers to buff | 147 | * eeh_gather_pci_data - Copy assorted PCI config space registers to buff |
138 | * @edev: device to report data for | 148 | * @edev: device to report data for |
@@ -145,73 +155,67 @@ static struct eeh_stats eeh_stats; | |||
145 | static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len) | 155 | static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len) |
146 | { | 156 | { |
147 | struct device_node *dn = eeh_dev_to_of_node(edev); | 157 | struct device_node *dn = eeh_dev_to_of_node(edev); |
148 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); | ||
149 | u32 cfg; | 158 | u32 cfg; |
150 | int cap, i; | 159 | int cap, i; |
151 | int n = 0; | 160 | int n = 0; |
152 | 161 | ||
153 | n += scnprintf(buf+n, len-n, "%s\n", dn->full_name); | 162 | n += scnprintf(buf+n, len-n, "%s\n", dn->full_name); |
154 | printk(KERN_WARNING "EEH: of node=%s\n", dn->full_name); | 163 | pr_warn("EEH: of node=%s\n", dn->full_name); |
155 | 164 | ||
156 | eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg); | 165 | eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg); |
157 | n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); | 166 | n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); |
158 | printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg); | 167 | pr_warn("EEH: PCI device/vendor: %08x\n", cfg); |
159 | 168 | ||
160 | eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg); | 169 | eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg); |
161 | n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); | 170 | n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); |
162 | printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg); | 171 | pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); |
163 | |||
164 | if (!dev) { | ||
165 | printk(KERN_WARNING "EEH: no PCI device for this of node\n"); | ||
166 | return n; | ||
167 | } | ||
168 | 172 | ||
169 | /* Gather bridge-specific registers */ | 173 | /* Gather bridge-specific registers */ |
170 | if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) { | 174 | if (edev->mode & EEH_DEV_BRIDGE) { |
171 | eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg); | 175 | eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg); |
172 | n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); | 176 | n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); |
173 | printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg); | 177 | pr_warn("EEH: Bridge secondary status: %04x\n", cfg); |
174 | 178 | ||
175 | eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg); | 179 | eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg); |
176 | n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); | 180 | n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); |
177 | printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg); | 181 | pr_warn("EEH: Bridge control: %04x\n", cfg); |
178 | } | 182 | } |
179 | 183 | ||
180 | /* Dump out the PCI-X command and status regs */ | 184 | /* Dump out the PCI-X command and status regs */ |
181 | cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); | 185 | cap = edev->pcix_cap; |
182 | if (cap) { | 186 | if (cap) { |
183 | eeh_ops->read_config(dn, cap, 4, &cfg); | 187 | eeh_ops->read_config(dn, cap, 4, &cfg); |
184 | n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); | 188 | n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); |
185 | printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg); | 189 | pr_warn("EEH: PCI-X cmd: %08x\n", cfg); |
186 | 190 | ||
187 | eeh_ops->read_config(dn, cap+4, 4, &cfg); | 191 | eeh_ops->read_config(dn, cap+4, 4, &cfg); |
188 | n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); | 192 | n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); |
189 | printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg); | 193 | pr_warn("EEH: PCI-X status: %08x\n", cfg); |
190 | } | 194 | } |
191 | 195 | ||
192 | /* If PCI-E capable, dump PCI-E cap 10, and the AER */ | 196 | /* If PCI-E capable, dump PCI-E cap 10 */ |
193 | if (pci_is_pcie(dev)) { | 197 | cap = edev->pcie_cap; |
198 | if (cap) { | ||
194 | n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); | 199 | n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); |
195 | printk(KERN_WARNING | 200 | pr_warn("EEH: PCI-E capabilities and status follow:\n"); |
196 | "EEH: PCI-E capabilities and status follow:\n"); | ||
197 | 201 | ||
198 | for (i=0; i<=8; i++) { | 202 | for (i=0; i<=8; i++) { |
199 | eeh_ops->read_config(dn, dev->pcie_cap+4*i, 4, &cfg); | 203 | eeh_ops->read_config(dn, cap+4*i, 4, &cfg); |
200 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); | 204 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); |
201 | printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg); | 205 | pr_warn("EEH: PCI-E %02x: %08x\n", i, cfg); |
202 | } | 206 | } |
207 | } | ||
203 | 208 | ||
204 | cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 209 | /* If AER capable, dump it */ |
205 | if (cap) { | 210 | cap = edev->aer_cap; |
206 | n += scnprintf(buf+n, len-n, "pci-e AER:\n"); | 211 | if (cap) { |
207 | printk(KERN_WARNING | 212 | n += scnprintf(buf+n, len-n, "pci-e AER:\n"); |
208 | "EEH: PCI-E AER capability register set follows:\n"); | 213 | pr_warn("EEH: PCI-E AER capability register set follows:\n"); |
209 | 214 | ||
210 | for (i=0; i<14; i++) { | 215 | for (i=0; i<14; i++) { |
211 | eeh_ops->read_config(dn, cap+4*i, 4, &cfg); | 216 | eeh_ops->read_config(dn, cap+4*i, 4, &cfg); |
212 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); | 217 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); |
213 | printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg); | 218 | pr_warn("EEH: PCI-E AER %02x: %08x\n", i, cfg); |
214 | } | ||
215 | } | 219 | } |
216 | } | 220 | } |
217 | 221 | ||
@@ -232,21 +236,19 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) | |||
232 | { | 236 | { |
233 | size_t loglen = 0; | 237 | size_t loglen = 0; |
234 | struct eeh_dev *edev, *tmp; | 238 | struct eeh_dev *edev, *tmp; |
235 | bool valid_cfg_log = true; | ||
236 | 239 | ||
237 | /* | 240 | /* |
238 | * When the PHB is fenced or dead, it's pointless to collect | 241 | * When the PHB is fenced or dead, it's pointless to collect |
239 | * the data from PCI config space because it should return | 242 | * the data from PCI config space because it should return |
240 | * 0xFF's. For ER, we still retrieve the data from the PCI | 243 | * 0xFF's. For ER, we still retrieve the data from the PCI |
241 | * config space. | 244 | * config space. |
245 | * | ||
246 | * For pHyp, we have to enable IO for log retrieval. Otherwise, | ||
247 | * 0xFF's is always returned from PCI config space. | ||
242 | */ | 248 | */ |
243 | if (eeh_probe_mode_dev() && | 249 | if (!(pe->type & EEH_PE_PHB)) { |
244 | (pe->type & EEH_PE_PHB) && | 250 | if (eeh_probe_mode_devtree()) |
245 | (pe->state & (EEH_PE_ISOLATED | EEH_PE_PHB_DEAD))) | 251 | eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); |
246 | valid_cfg_log = false; | ||
247 | |||
248 | if (valid_cfg_log) { | ||
249 | eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); | ||
250 | eeh_ops->configure_bridge(pe); | 252 | eeh_ops->configure_bridge(pe); |
251 | eeh_pe_restore_bars(pe); | 253 | eeh_pe_restore_bars(pe); |
252 | 254 | ||
@@ -309,7 +311,7 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) | |||
309 | 311 | ||
310 | /* If the PHB has been in problematic state */ | 312 | /* If the PHB has been in problematic state */ |
311 | eeh_serialize_lock(&flags); | 313 | eeh_serialize_lock(&flags); |
312 | if (phb_pe->state & (EEH_PE_ISOLATED | EEH_PE_PHB_DEAD)) { | 314 | if (phb_pe->state & EEH_PE_ISOLATED) { |
313 | ret = 0; | 315 | ret = 0; |
314 | goto out; | 316 | goto out; |
315 | } | 317 | } |
@@ -515,16 +517,42 @@ EXPORT_SYMBOL(eeh_check_failure); | |||
515 | */ | 517 | */ |
516 | int eeh_pci_enable(struct eeh_pe *pe, int function) | 518 | int eeh_pci_enable(struct eeh_pe *pe, int function) |
517 | { | 519 | { |
518 | int rc; | 520 | int rc, flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); |
521 | |||
522 | /* | ||
523 | * pHyp doesn't allow to enable IO or DMA on unfrozen PE. | ||
524 | * Also, it's pointless to enable them on unfrozen PE. So | ||
525 | * we have the check here. | ||
526 | */ | ||
527 | if (function == EEH_OPT_THAW_MMIO || | ||
528 | function == EEH_OPT_THAW_DMA) { | ||
529 | rc = eeh_ops->get_state(pe, NULL); | ||
530 | if (rc < 0) | ||
531 | return rc; | ||
532 | |||
533 | /* Needn't to enable or already enabled */ | ||
534 | if ((rc == EEH_STATE_NOT_SUPPORT) || | ||
535 | ((rc & flags) == flags)) | ||
536 | return 0; | ||
537 | } | ||
519 | 538 | ||
520 | rc = eeh_ops->set_option(pe, function); | 539 | rc = eeh_ops->set_option(pe, function); |
521 | if (rc) | 540 | if (rc) |
522 | pr_warning("%s: Unexpected state change %d on PHB#%d-PE#%x, err=%d\n", | 541 | pr_warn("%s: Unexpected state change %d on " |
523 | __func__, function, pe->phb->global_number, pe->addr, rc); | 542 | "PHB#%d-PE#%x, err=%d\n", |
543 | __func__, function, pe->phb->global_number, | ||
544 | pe->addr, rc); | ||
524 | 545 | ||
525 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); | 546 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); |
526 | if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) && | 547 | if (rc <= 0) |
527 | (function == EEH_OPT_THAW_MMIO)) | 548 | return rc; |
549 | |||
550 | if ((function == EEH_OPT_THAW_MMIO) && | ||
551 | (rc & EEH_STATE_MMIO_ENABLED)) | ||
552 | return 0; | ||
553 | |||
554 | if ((function == EEH_OPT_THAW_DMA) && | ||
555 | (rc & EEH_STATE_DMA_ENABLED)) | ||
528 | return 0; | 556 | return 0; |
529 | 557 | ||
530 | return rc; | 558 | return rc; |
@@ -612,26 +640,7 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) | |||
612 | else | 640 | else |
613 | eeh_ops->reset(pe, EEH_RESET_HOT); | 641 | eeh_ops->reset(pe, EEH_RESET_HOT); |
614 | 642 | ||
615 | /* The PCI bus requires that the reset be held high for at least | ||
616 | * a 100 milliseconds. We wait a bit longer 'just in case'. | ||
617 | */ | ||
618 | #define PCI_BUS_RST_HOLD_TIME_MSEC 250 | ||
619 | msleep(PCI_BUS_RST_HOLD_TIME_MSEC); | ||
620 | |||
621 | /* We might get hit with another EEH freeze as soon as the | ||
622 | * pci slot reset line is dropped. Make sure we don't miss | ||
623 | * these, and clear the flag now. | ||
624 | */ | ||
625 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); | ||
626 | |||
627 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); | 643 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); |
628 | |||
629 | /* After a PCI slot has been reset, the PCI Express spec requires | ||
630 | * a 1.5 second idle time for the bus to stabilize, before starting | ||
631 | * up traffic. | ||
632 | */ | ||
633 | #define PCI_BUS_SETTLE_TIME_MSEC 1800 | ||
634 | msleep(PCI_BUS_SETTLE_TIME_MSEC); | ||
635 | } | 644 | } |
636 | 645 | ||
637 | /** | 646 | /** |
@@ -651,6 +660,10 @@ int eeh_reset_pe(struct eeh_pe *pe) | |||
651 | for (i=0; i<3; i++) { | 660 | for (i=0; i<3; i++) { |
652 | eeh_reset_pe_once(pe); | 661 | eeh_reset_pe_once(pe); |
653 | 662 | ||
663 | /* | ||
664 | * EEH_PE_ISOLATED is expected to be removed after | ||
665 | * BAR restore. | ||
666 | */ | ||
654 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); | 667 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); |
655 | if ((rc & flags) == flags) | 668 | if ((rc & flags) == flags) |
656 | return 0; | 669 | return 0; |
@@ -826,8 +839,8 @@ int eeh_init(void) | |||
826 | &hose_list, list_node) | 839 | &hose_list, list_node) |
827 | pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL); | 840 | pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL); |
828 | } else { | 841 | } else { |
829 | pr_warning("%s: Invalid probe mode %d\n", | 842 | pr_warn("%s: Invalid probe mode %x", |
830 | __func__, eeh_probe_mode); | 843 | __func__, eeh_subsystem_flags); |
831 | return -EINVAL; | 844 | return -EINVAL; |
832 | } | 845 | } |
833 | 846 | ||
@@ -1102,10 +1115,45 @@ static const struct file_operations proc_eeh_operations = { | |||
1102 | .release = single_release, | 1115 | .release = single_release, |
1103 | }; | 1116 | }; |
1104 | 1117 | ||
1118 | #ifdef CONFIG_DEBUG_FS | ||
1119 | static int eeh_enable_dbgfs_set(void *data, u64 val) | ||
1120 | { | ||
1121 | if (val) | ||
1122 | eeh_subsystem_flags &= ~EEH_FORCE_DISABLED; | ||
1123 | else | ||
1124 | eeh_subsystem_flags |= EEH_FORCE_DISABLED; | ||
1125 | |||
1126 | /* Notify the backend */ | ||
1127 | if (eeh_ops->post_init) | ||
1128 | eeh_ops->post_init(); | ||
1129 | |||
1130 | return 0; | ||
1131 | } | ||
1132 | |||
1133 | static int eeh_enable_dbgfs_get(void *data, u64 *val) | ||
1134 | { | ||
1135 | if (eeh_enabled()) | ||
1136 | *val = 0x1ul; | ||
1137 | else | ||
1138 | *val = 0x0ul; | ||
1139 | return 0; | ||
1140 | } | ||
1141 | |||
1142 | DEFINE_SIMPLE_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get, | ||
1143 | eeh_enable_dbgfs_set, "0x%llx\n"); | ||
1144 | #endif | ||
1145 | |||
1105 | static int __init eeh_init_proc(void) | 1146 | static int __init eeh_init_proc(void) |
1106 | { | 1147 | { |
1107 | if (machine_is(pseries) || machine_is(powernv)) | 1148 | if (machine_is(pseries) || machine_is(powernv)) { |
1108 | proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); | 1149 | proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); |
1150 | #ifdef CONFIG_DEBUG_FS | ||
1151 | debugfs_create_file("eeh_enable", 0600, | ||
1152 | powerpc_debugfs_root, NULL, | ||
1153 | &eeh_enable_dbgfs_ops); | ||
1154 | #endif | ||
1155 | } | ||
1156 | |||
1109 | return 0; | 1157 | return 0; |
1110 | } | 1158 | } |
1111 | __initcall(eeh_init_proc); | 1159 | __initcall(eeh_init_proc); |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index bb61ca58ca6d..7100a5b96e70 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -171,6 +171,15 @@ static void eeh_enable_irq(struct pci_dev *dev) | |||
171 | } | 171 | } |
172 | } | 172 | } |
173 | 173 | ||
174 | static bool eeh_dev_removed(struct eeh_dev *edev) | ||
175 | { | ||
176 | /* EEH device removed ? */ | ||
177 | if (!edev || (edev->mode & EEH_DEV_REMOVED)) | ||
178 | return true; | ||
179 | |||
180 | return false; | ||
181 | } | ||
182 | |||
174 | /** | 183 | /** |
175 | * eeh_report_error - Report pci error to each device driver | 184 | * eeh_report_error - Report pci error to each device driver |
176 | * @data: eeh device | 185 | * @data: eeh device |
@@ -187,10 +196,8 @@ static void *eeh_report_error(void *data, void *userdata) | |||
187 | enum pci_ers_result rc, *res = userdata; | 196 | enum pci_ers_result rc, *res = userdata; |
188 | struct pci_driver *driver; | 197 | struct pci_driver *driver; |
189 | 198 | ||
190 | /* We might not have the associated PCI device, | 199 | if (!dev || eeh_dev_removed(edev)) |
191 | * then we should continue for next one. | 200 | return NULL; |
192 | */ | ||
193 | if (!dev) return NULL; | ||
194 | dev->error_state = pci_channel_io_frozen; | 201 | dev->error_state = pci_channel_io_frozen; |
195 | 202 | ||
196 | driver = eeh_pcid_get(dev); | 203 | driver = eeh_pcid_get(dev); |
@@ -230,6 +237,9 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata) | |||
230 | enum pci_ers_result rc, *res = userdata; | 237 | enum pci_ers_result rc, *res = userdata; |
231 | struct pci_driver *driver; | 238 | struct pci_driver *driver; |
232 | 239 | ||
240 | if (!dev || eeh_dev_removed(edev)) | ||
241 | return NULL; | ||
242 | |||
233 | driver = eeh_pcid_get(dev); | 243 | driver = eeh_pcid_get(dev); |
234 | if (!driver) return NULL; | 244 | if (!driver) return NULL; |
235 | 245 | ||
@@ -267,7 +277,8 @@ static void *eeh_report_reset(void *data, void *userdata) | |||
267 | enum pci_ers_result rc, *res = userdata; | 277 | enum pci_ers_result rc, *res = userdata; |
268 | struct pci_driver *driver; | 278 | struct pci_driver *driver; |
269 | 279 | ||
270 | if (!dev) return NULL; | 280 | if (!dev || eeh_dev_removed(edev)) |
281 | return NULL; | ||
271 | dev->error_state = pci_channel_io_normal; | 282 | dev->error_state = pci_channel_io_normal; |
272 | 283 | ||
273 | driver = eeh_pcid_get(dev); | 284 | driver = eeh_pcid_get(dev); |
@@ -307,7 +318,8 @@ static void *eeh_report_resume(void *data, void *userdata) | |||
307 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); | 318 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); |
308 | struct pci_driver *driver; | 319 | struct pci_driver *driver; |
309 | 320 | ||
310 | if (!dev) return NULL; | 321 | if (!dev || eeh_dev_removed(edev)) |
322 | return NULL; | ||
311 | dev->error_state = pci_channel_io_normal; | 323 | dev->error_state = pci_channel_io_normal; |
312 | 324 | ||
313 | driver = eeh_pcid_get(dev); | 325 | driver = eeh_pcid_get(dev); |
@@ -343,7 +355,8 @@ static void *eeh_report_failure(void *data, void *userdata) | |||
343 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); | 355 | struct pci_dev *dev = eeh_dev_to_pci_dev(edev); |
344 | struct pci_driver *driver; | 356 | struct pci_driver *driver; |
345 | 357 | ||
346 | if (!dev) return NULL; | 358 | if (!dev || eeh_dev_removed(edev)) |
359 | return NULL; | ||
347 | dev->error_state = pci_channel_io_perm_failure; | 360 | dev->error_state = pci_channel_io_perm_failure; |
348 | 361 | ||
349 | driver = eeh_pcid_get(dev); | 362 | driver = eeh_pcid_get(dev); |
@@ -380,6 +393,16 @@ static void *eeh_rmv_device(void *data, void *userdata) | |||
380 | if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) | 393 | if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) |
381 | return NULL; | 394 | return NULL; |
382 | 395 | ||
396 | /* | ||
397 | * We rely on count-based pcibios_release_device() to | ||
398 | * detach permanently offlined PEs. Unfortunately, that's | ||
399 | * not reliable enough. We might have the permanently | ||
400 | * offlined PEs attached, but we needn't take care of | ||
401 | * them and their child devices. | ||
402 | */ | ||
403 | if (eeh_dev_removed(edev)) | ||
404 | return NULL; | ||
405 | |||
383 | driver = eeh_pcid_get(dev); | 406 | driver = eeh_pcid_get(dev); |
384 | if (driver) { | 407 | if (driver) { |
385 | eeh_pcid_put(dev); | 408 | eeh_pcid_put(dev); |
@@ -417,6 +440,36 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) | |||
417 | return NULL; | 440 | return NULL; |
418 | } | 441 | } |
419 | 442 | ||
443 | /* | ||
444 | * Explicitly clear PE's frozen state for PowerNV where | ||
445 | * we have frozen PE until BAR restore is completed. It's | ||
446 | * harmless to clear it for pSeries. To be consistent with | ||
447 | * PE reset (for 3 times), we try to clear the frozen state | ||
448 | * for 3 times as well. | ||
449 | */ | ||
450 | static int eeh_clear_pe_frozen_state(struct eeh_pe *pe) | ||
451 | { | ||
452 | int i, rc; | ||
453 | |||
454 | for (i = 0; i < 3; i++) { | ||
455 | rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); | ||
456 | if (rc) | ||
457 | continue; | ||
458 | rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); | ||
459 | if (!rc) | ||
460 | break; | ||
461 | } | ||
462 | |||
463 | /* The PE has been isolated, clear it */ | ||
464 | if (rc) | ||
465 | pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n", | ||
466 | __func__, pe->phb->global_number, pe->addr, rc); | ||
467 | else | ||
468 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); | ||
469 | |||
470 | return rc; | ||
471 | } | ||
472 | |||
420 | /** | 473 | /** |
421 | * eeh_reset_device - Perform actual reset of a pci slot | 474 | * eeh_reset_device - Perform actual reset of a pci slot |
422 | * @pe: EEH PE | 475 | * @pe: EEH PE |
@@ -451,19 +504,33 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) | |||
451 | eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed); | 504 | eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed); |
452 | } | 505 | } |
453 | 506 | ||
454 | /* Reset the pci controller. (Asserts RST#; resets config space). | 507 | /* |
508 | * Reset the pci controller. (Asserts RST#; resets config space). | ||
455 | * Reconfigure bridges and devices. Don't try to bring the system | 509 | * Reconfigure bridges and devices. Don't try to bring the system |
456 | * up if the reset failed for some reason. | 510 | * up if the reset failed for some reason. |
511 | * | ||
512 | * During the reset, it's very dangerous to have uncontrolled PCI | ||
513 | * config accesses. So we prefer to block them. However, controlled | ||
514 | * PCI config accesses initiated from EEH itself are allowed. | ||
457 | */ | 515 | */ |
516 | eeh_pe_state_mark(pe, EEH_PE_RESET); | ||
458 | rc = eeh_reset_pe(pe); | 517 | rc = eeh_reset_pe(pe); |
459 | if (rc) | 518 | if (rc) { |
519 | eeh_pe_state_clear(pe, EEH_PE_RESET); | ||
460 | return rc; | 520 | return rc; |
521 | } | ||
461 | 522 | ||
462 | pci_lock_rescan_remove(); | 523 | pci_lock_rescan_remove(); |
463 | 524 | ||
464 | /* Restore PE */ | 525 | /* Restore PE */ |
465 | eeh_ops->configure_bridge(pe); | 526 | eeh_ops->configure_bridge(pe); |
466 | eeh_pe_restore_bars(pe); | 527 | eeh_pe_restore_bars(pe); |
528 | eeh_pe_state_clear(pe, EEH_PE_RESET); | ||
529 | |||
530 | /* Clear frozen state */ | ||
531 | rc = eeh_clear_pe_frozen_state(pe); | ||
532 | if (rc) | ||
533 | return rc; | ||
467 | 534 | ||
468 | /* Give the system 5 seconds to finish running the user-space | 535 | /* Give the system 5 seconds to finish running the user-space |
469 | * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, | 536 | * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, |
@@ -573,7 +640,6 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) | |||
573 | result = PCI_ERS_RESULT_NEED_RESET; | 640 | result = PCI_ERS_RESULT_NEED_RESET; |
574 | } else { | 641 | } else { |
575 | pr_info("EEH: Notify device drivers to resume I/O\n"); | 642 | pr_info("EEH: Notify device drivers to resume I/O\n"); |
576 | result = PCI_ERS_RESULT_NONE; | ||
577 | eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); | 643 | eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); |
578 | } | 644 | } |
579 | } | 645 | } |
@@ -585,10 +651,17 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) | |||
585 | 651 | ||
586 | if (rc < 0) | 652 | if (rc < 0) |
587 | goto hard_fail; | 653 | goto hard_fail; |
588 | if (rc) | 654 | if (rc) { |
589 | result = PCI_ERS_RESULT_NEED_RESET; | 655 | result = PCI_ERS_RESULT_NEED_RESET; |
590 | else | 656 | } else { |
657 | /* | ||
658 | * We didn't do PE reset for the case. The PE | ||
659 | * is still in frozen state. Clear it before | ||
660 | * resuming the PE. | ||
661 | */ | ||
662 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); | ||
591 | result = PCI_ERS_RESULT_RECOVERED; | 663 | result = PCI_ERS_RESULT_RECOVERED; |
664 | } | ||
592 | } | 665 | } |
593 | 666 | ||
594 | /* If any device has a hard failure, then shut off everything. */ | 667 | /* If any device has a hard failure, then shut off everything. */ |
@@ -650,8 +723,17 @@ perm_error: | |||
650 | /* Notify all devices that they're about to go down. */ | 723 | /* Notify all devices that they're about to go down. */ |
651 | eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); | 724 | eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); |
652 | 725 | ||
653 | /* Shut down the device drivers for good. */ | 726 | /* Mark the PE to be removed permanently */ |
727 | pe->freeze_count = EEH_MAX_ALLOWED_FREEZES + 1; | ||
728 | |||
729 | /* | ||
730 | * Shut down the device drivers for good. We mark | ||
731 | * all removed devices correctly to avoid access | ||
732 | * the their PCI config any more. | ||
733 | */ | ||
654 | if (frozen_bus) { | 734 | if (frozen_bus) { |
735 | eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); | ||
736 | |||
655 | pci_lock_rescan_remove(); | 737 | pci_lock_rescan_remove(); |
656 | pcibios_remove_pci_devices(frozen_bus); | 738 | pcibios_remove_pci_devices(frozen_bus); |
657 | pci_unlock_rescan_remove(); | 739 | pci_unlock_rescan_remove(); |
@@ -682,8 +764,7 @@ static void eeh_handle_special_event(void) | |||
682 | phb_pe = eeh_phb_pe_get(hose); | 764 | phb_pe = eeh_phb_pe_get(hose); |
683 | if (!phb_pe) continue; | 765 | if (!phb_pe) continue; |
684 | 766 | ||
685 | eeh_pe_state_mark(phb_pe, | 767 | eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); |
686 | EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); | ||
687 | } | 768 | } |
688 | 769 | ||
689 | eeh_serialize_unlock(flags); | 770 | eeh_serialize_unlock(flags); |
@@ -699,8 +780,7 @@ static void eeh_handle_special_event(void) | |||
699 | eeh_remove_event(pe); | 780 | eeh_remove_event(pe); |
700 | 781 | ||
701 | if (rc == EEH_NEXT_ERR_DEAD_PHB) | 782 | if (rc == EEH_NEXT_ERR_DEAD_PHB) |
702 | eeh_pe_state_mark(pe, | 783 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); |
703 | EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); | ||
704 | else | 784 | else |
705 | eeh_pe_state_mark(pe, | 785 | eeh_pe_state_mark(pe, |
706 | EEH_PE_ISOLATED | EEH_PE_RECOVERING); | 786 | EEH_PE_ISOLATED | EEH_PE_RECOVERING); |
@@ -724,12 +804,14 @@ static void eeh_handle_special_event(void) | |||
724 | if (rc == EEH_NEXT_ERR_FROZEN_PE || | 804 | if (rc == EEH_NEXT_ERR_FROZEN_PE || |
725 | rc == EEH_NEXT_ERR_FENCED_PHB) { | 805 | rc == EEH_NEXT_ERR_FENCED_PHB) { |
726 | eeh_handle_normal_event(pe); | 806 | eeh_handle_normal_event(pe); |
807 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING); | ||
727 | } else { | 808 | } else { |
728 | pci_lock_rescan_remove(); | 809 | pci_lock_rescan_remove(); |
729 | list_for_each_entry(hose, &hose_list, list_node) { | 810 | list_for_each_entry(hose, &hose_list, list_node) { |
730 | phb_pe = eeh_phb_pe_get(hose); | 811 | phb_pe = eeh_phb_pe_get(hose); |
731 | if (!phb_pe || | 812 | if (!phb_pe || |
732 | !(phb_pe->state & EEH_PE_PHB_DEAD)) | 813 | !(phb_pe->state & EEH_PE_ISOLATED) || |
814 | (phb_pe->state & EEH_PE_RECOVERING)) | ||
733 | continue; | 815 | continue; |
734 | 816 | ||
735 | /* Notify all devices to be down */ | 817 | /* Notify all devices to be down */ |
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index f0c353fa655a..995c2a284630 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
@@ -503,13 +503,17 @@ static void *__eeh_pe_state_mark(void *data, void *flag) | |||
503 | struct eeh_dev *edev, *tmp; | 503 | struct eeh_dev *edev, *tmp; |
504 | struct pci_dev *pdev; | 504 | struct pci_dev *pdev; |
505 | 505 | ||
506 | /* | 506 | /* Keep the state of permanently removed PE intact */ |
507 | * Mark the PE with the indicated state. Also, | 507 | if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) && |
508 | * the associated PCI device will be put into | 508 | (state & (EEH_PE_ISOLATED | EEH_PE_RECOVERING))) |
509 | * I/O frozen state to avoid I/O accesses from | 509 | return NULL; |
510 | * the PCI device driver. | 510 | |
511 | */ | ||
512 | pe->state |= state; | 511 | pe->state |= state; |
512 | |||
513 | /* Offline PCI devices if applicable */ | ||
514 | if (state != EEH_PE_ISOLATED) | ||
515 | return NULL; | ||
516 | |||
513 | eeh_pe_for_each_dev(pe, edev, tmp) { | 517 | eeh_pe_for_each_dev(pe, edev, tmp) { |
514 | pdev = eeh_dev_to_pci_dev(edev); | 518 | pdev = eeh_dev_to_pci_dev(edev); |
515 | if (pdev) | 519 | if (pdev) |
@@ -532,6 +536,27 @@ void eeh_pe_state_mark(struct eeh_pe *pe, int state) | |||
532 | eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); | 536 | eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); |
533 | } | 537 | } |
534 | 538 | ||
539 | static void *__eeh_pe_dev_mode_mark(void *data, void *flag) | ||
540 | { | ||
541 | struct eeh_dev *edev = data; | ||
542 | int mode = *((int *)flag); | ||
543 | |||
544 | edev->mode |= mode; | ||
545 | |||
546 | return NULL; | ||
547 | } | ||
548 | |||
549 | /** | ||
550 | * eeh_pe_dev_state_mark - Mark state for all device under the PE | ||
551 | * @pe: EEH PE | ||
552 | * | ||
553 | * Mark specific state for all child devices of the PE. | ||
554 | */ | ||
555 | void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode) | ||
556 | { | ||
557 | eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode); | ||
558 | } | ||
559 | |||
535 | /** | 560 | /** |
536 | * __eeh_pe_state_clear - Clear state for the PE | 561 | * __eeh_pe_state_clear - Clear state for the PE |
537 | * @data: EEH PE | 562 | * @data: EEH PE |
@@ -546,8 +571,16 @@ static void *__eeh_pe_state_clear(void *data, void *flag) | |||
546 | struct eeh_pe *pe = (struct eeh_pe *)data; | 571 | struct eeh_pe *pe = (struct eeh_pe *)data; |
547 | int state = *((int *)flag); | 572 | int state = *((int *)flag); |
548 | 573 | ||
574 | /* Keep the state of permanently removed PE intact */ | ||
575 | if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) && | ||
576 | (state & EEH_PE_ISOLATED)) | ||
577 | return NULL; | ||
578 | |||
549 | pe->state &= ~state; | 579 | pe->state &= ~state; |
550 | pe->check_count = 0; | 580 | |
581 | /* Clear check count since last isolation */ | ||
582 | if (state & EEH_PE_ISOLATED) | ||
583 | pe->check_count = 0; | ||
551 | 584 | ||
552 | return NULL; | 585 | return NULL; |
553 | } | 586 | } |
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index 5d753d4f2c75..e2595ba4b720 100644 --- a/arch/powerpc/kernel/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c | |||
@@ -59,6 +59,9 @@ void eeh_sysfs_add_device(struct pci_dev *pdev) | |||
59 | struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); | 59 | struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); |
60 | int rc=0; | 60 | int rc=0; |
61 | 61 | ||
62 | if (!eeh_enabled()) | ||
63 | return; | ||
64 | |||
62 | if (edev && (edev->mode & EEH_DEV_SYSFS)) | 65 | if (edev && (edev->mode & EEH_DEV_SYSFS)) |
63 | return; | 66 | return; |
64 | 67 | ||
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 662c6dd98072..911d45366f59 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -39,8 +39,8 @@ | |||
39 | * System calls. | 39 | * System calls. |
40 | */ | 40 | */ |
41 | .section ".toc","aw" | 41 | .section ".toc","aw" |
42 | .SYS_CALL_TABLE: | 42 | SYS_CALL_TABLE: |
43 | .tc .sys_call_table[TC],.sys_call_table | 43 | .tc sys_call_table[TC],sys_call_table |
44 | 44 | ||
45 | /* This value is used to mark exception frames on the stack. */ | 45 | /* This value is used to mark exception frames on the stack. */ |
46 | exception_marker: | 46 | exception_marker: |
@@ -106,7 +106,7 @@ BEGIN_FW_FTR_SECTION | |||
106 | LDX_BE r10,0,r10 /* get log write index */ | 106 | LDX_BE r10,0,r10 /* get log write index */ |
107 | cmpd cr1,r11,r10 | 107 | cmpd cr1,r11,r10 |
108 | beq+ cr1,33f | 108 | beq+ cr1,33f |
109 | bl .accumulate_stolen_time | 109 | bl accumulate_stolen_time |
110 | REST_GPR(0,r1) | 110 | REST_GPR(0,r1) |
111 | REST_4GPRS(3,r1) | 111 | REST_4GPRS(3,r1) |
112 | REST_2GPRS(7,r1) | 112 | REST_2GPRS(7,r1) |
@@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
143 | std r10,SOFTE(r1) | 143 | std r10,SOFTE(r1) |
144 | 144 | ||
145 | #ifdef SHOW_SYSCALLS | 145 | #ifdef SHOW_SYSCALLS |
146 | bl .do_show_syscall | 146 | bl do_show_syscall |
147 | REST_GPR(0,r1) | 147 | REST_GPR(0,r1) |
148 | REST_4GPRS(3,r1) | 148 | REST_4GPRS(3,r1) |
149 | REST_2GPRS(7,r1) | 149 | REST_2GPRS(7,r1) |
@@ -162,7 +162,7 @@ system_call: /* label this so stack traces look sane */ | |||
162 | * Need to vector to 32 Bit or default sys_call_table here, | 162 | * Need to vector to 32 Bit or default sys_call_table here, |
163 | * based on caller's run-mode / personality. | 163 | * based on caller's run-mode / personality. |
164 | */ | 164 | */ |
165 | ld r11,.SYS_CALL_TABLE@toc(2) | 165 | ld r11,SYS_CALL_TABLE@toc(2) |
166 | andi. r10,r10,_TIF_32BIT | 166 | andi. r10,r10,_TIF_32BIT |
167 | beq 15f | 167 | beq 15f |
168 | addi r11,r11,8 /* use 32-bit syscall entries */ | 168 | addi r11,r11,8 /* use 32-bit syscall entries */ |
@@ -174,14 +174,14 @@ system_call: /* label this so stack traces look sane */ | |||
174 | clrldi r8,r8,32 | 174 | clrldi r8,r8,32 |
175 | 15: | 175 | 15: |
176 | slwi r0,r0,4 | 176 | slwi r0,r0,4 |
177 | ldx r10,r11,r0 /* Fetch system call handler [ptr] */ | 177 | ldx r12,r11,r0 /* Fetch system call handler [ptr] */ |
178 | mtctr r10 | 178 | mtctr r12 |
179 | bctrl /* Call handler */ | 179 | bctrl /* Call handler */ |
180 | 180 | ||
181 | syscall_exit: | 181 | syscall_exit: |
182 | std r3,RESULT(r1) | 182 | std r3,RESULT(r1) |
183 | #ifdef SHOW_SYSCALLS | 183 | #ifdef SHOW_SYSCALLS |
184 | bl .do_show_syscall_exit | 184 | bl do_show_syscall_exit |
185 | ld r3,RESULT(r1) | 185 | ld r3,RESULT(r1) |
186 | #endif | 186 | #endif |
187 | CURRENT_THREAD_INFO(r12, r1) | 187 | CURRENT_THREAD_INFO(r12, r1) |
@@ -248,9 +248,9 @@ syscall_error: | |||
248 | 248 | ||
249 | /* Traced system call support */ | 249 | /* Traced system call support */ |
250 | syscall_dotrace: | 250 | syscall_dotrace: |
251 | bl .save_nvgprs | 251 | bl save_nvgprs |
252 | addi r3,r1,STACK_FRAME_OVERHEAD | 252 | addi r3,r1,STACK_FRAME_OVERHEAD |
253 | bl .do_syscall_trace_enter | 253 | bl do_syscall_trace_enter |
254 | /* | 254 | /* |
255 | * Restore argument registers possibly just changed. | 255 | * Restore argument registers possibly just changed. |
256 | * We use the return value of do_syscall_trace_enter | 256 | * We use the return value of do_syscall_trace_enter |
@@ -308,7 +308,7 @@ syscall_exit_work: | |||
308 | 4: /* Anything else left to do? */ | 308 | 4: /* Anything else left to do? */ |
309 | SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ | 309 | SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ |
310 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | 310 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
311 | beq .ret_from_except_lite | 311 | beq ret_from_except_lite |
312 | 312 | ||
313 | /* Re-enable interrupts */ | 313 | /* Re-enable interrupts */ |
314 | #ifdef CONFIG_PPC_BOOK3E | 314 | #ifdef CONFIG_PPC_BOOK3E |
@@ -319,10 +319,10 @@ syscall_exit_work: | |||
319 | mtmsrd r10,1 | 319 | mtmsrd r10,1 |
320 | #endif /* CONFIG_PPC_BOOK3E */ | 320 | #endif /* CONFIG_PPC_BOOK3E */ |
321 | 321 | ||
322 | bl .save_nvgprs | 322 | bl save_nvgprs |
323 | addi r3,r1,STACK_FRAME_OVERHEAD | 323 | addi r3,r1,STACK_FRAME_OVERHEAD |
324 | bl .do_syscall_trace_leave | 324 | bl do_syscall_trace_leave |
325 | b .ret_from_except | 325 | b ret_from_except |
326 | 326 | ||
327 | /* Save non-volatile GPRs, if not already saved. */ | 327 | /* Save non-volatile GPRs, if not already saved. */ |
328 | _GLOBAL(save_nvgprs) | 328 | _GLOBAL(save_nvgprs) |
@@ -345,52 +345,48 @@ _GLOBAL(save_nvgprs) | |||
345 | */ | 345 | */ |
346 | 346 | ||
347 | _GLOBAL(ppc_fork) | 347 | _GLOBAL(ppc_fork) |
348 | bl .save_nvgprs | 348 | bl save_nvgprs |
349 | bl .sys_fork | 349 | bl sys_fork |
350 | b syscall_exit | 350 | b syscall_exit |
351 | 351 | ||
352 | _GLOBAL(ppc_vfork) | 352 | _GLOBAL(ppc_vfork) |
353 | bl .save_nvgprs | 353 | bl save_nvgprs |
354 | bl .sys_vfork | 354 | bl sys_vfork |
355 | b syscall_exit | 355 | b syscall_exit |
356 | 356 | ||
357 | _GLOBAL(ppc_clone) | 357 | _GLOBAL(ppc_clone) |
358 | bl .save_nvgprs | 358 | bl save_nvgprs |
359 | bl .sys_clone | 359 | bl sys_clone |
360 | b syscall_exit | 360 | b syscall_exit |
361 | 361 | ||
362 | _GLOBAL(ppc32_swapcontext) | 362 | _GLOBAL(ppc32_swapcontext) |
363 | bl .save_nvgprs | 363 | bl save_nvgprs |
364 | bl .compat_sys_swapcontext | 364 | bl compat_sys_swapcontext |
365 | b syscall_exit | 365 | b syscall_exit |
366 | 366 | ||
367 | _GLOBAL(ppc64_swapcontext) | 367 | _GLOBAL(ppc64_swapcontext) |
368 | bl .save_nvgprs | 368 | bl save_nvgprs |
369 | bl .sys_swapcontext | 369 | bl sys_swapcontext |
370 | b syscall_exit | 370 | b syscall_exit |
371 | 371 | ||
372 | _GLOBAL(ret_from_fork) | 372 | _GLOBAL(ret_from_fork) |
373 | bl .schedule_tail | 373 | bl schedule_tail |
374 | REST_NVGPRS(r1) | 374 | REST_NVGPRS(r1) |
375 | li r3,0 | 375 | li r3,0 |
376 | b syscall_exit | 376 | b syscall_exit |
377 | 377 | ||
378 | _GLOBAL(ret_from_kernel_thread) | 378 | _GLOBAL(ret_from_kernel_thread) |
379 | bl .schedule_tail | 379 | bl schedule_tail |
380 | REST_NVGPRS(r1) | 380 | REST_NVGPRS(r1) |
381 | ld r14, 0(r14) | ||
382 | mtlr r14 | 381 | mtlr r14 |
383 | mr r3,r15 | 382 | mr r3,r15 |
383 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
384 | mr r12,r14 | ||
385 | #endif | ||
384 | blrl | 386 | blrl |
385 | li r3,0 | 387 | li r3,0 |
386 | b syscall_exit | 388 | b syscall_exit |
387 | 389 | ||
388 | .section ".toc","aw" | ||
389 | DSCR_DEFAULT: | ||
390 | .tc dscr_default[TC],dscr_default | ||
391 | |||
392 | .section ".text" | ||
393 | |||
394 | /* | 390 | /* |
395 | * This routine switches between two different tasks. The process | 391 | * This routine switches between two different tasks. The process |
396 | * state of one is saved on its kernel stack. Then the state | 392 | * state of one is saved on its kernel stack. Then the state |
@@ -575,11 +571,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
575 | #ifdef CONFIG_PPC64 | 571 | #ifdef CONFIG_PPC64 |
576 | BEGIN_FTR_SECTION | 572 | BEGIN_FTR_SECTION |
577 | lwz r6,THREAD_DSCR_INHERIT(r4) | 573 | lwz r6,THREAD_DSCR_INHERIT(r4) |
578 | ld r7,DSCR_DEFAULT@toc(2) | ||
579 | ld r0,THREAD_DSCR(r4) | 574 | ld r0,THREAD_DSCR(r4) |
580 | cmpwi r6,0 | 575 | cmpwi r6,0 |
581 | bne 1f | 576 | bne 1f |
582 | ld r0,0(r7) | 577 | ld r0,PACA_DSCR(r13) |
583 | 1: | 578 | 1: |
584 | BEGIN_FTR_SECTION_NESTED(70) | 579 | BEGIN_FTR_SECTION_NESTED(70) |
585 | mfspr r8, SPRN_FSCR | 580 | mfspr r8, SPRN_FSCR |
@@ -611,7 +606,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | |||
611 | _GLOBAL(ret_from_except) | 606 | _GLOBAL(ret_from_except) |
612 | ld r11,_TRAP(r1) | 607 | ld r11,_TRAP(r1) |
613 | andi. r0,r11,1 | 608 | andi. r0,r11,1 |
614 | bne .ret_from_except_lite | 609 | bne ret_from_except_lite |
615 | REST_NVGPRS(r1) | 610 | REST_NVGPRS(r1) |
616 | 611 | ||
617 | _GLOBAL(ret_from_except_lite) | 612 | _GLOBAL(ret_from_except_lite) |
@@ -661,23 +656,23 @@ _GLOBAL(ret_from_except_lite) | |||
661 | #endif | 656 | #endif |
662 | 1: andi. r0,r4,_TIF_NEED_RESCHED | 657 | 1: andi. r0,r4,_TIF_NEED_RESCHED |
663 | beq 2f | 658 | beq 2f |
664 | bl .restore_interrupts | 659 | bl restore_interrupts |
665 | SCHEDULE_USER | 660 | SCHEDULE_USER |
666 | b .ret_from_except_lite | 661 | b ret_from_except_lite |
667 | 2: | 662 | 2: |
668 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 663 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
669 | andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM | 664 | andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM |
670 | bne 3f /* only restore TM if nothing else to do */ | 665 | bne 3f /* only restore TM if nothing else to do */ |
671 | addi r3,r1,STACK_FRAME_OVERHEAD | 666 | addi r3,r1,STACK_FRAME_OVERHEAD |
672 | bl .restore_tm_state | 667 | bl restore_tm_state |
673 | b restore | 668 | b restore |
674 | 3: | 669 | 3: |
675 | #endif | 670 | #endif |
676 | bl .save_nvgprs | 671 | bl save_nvgprs |
677 | bl .restore_interrupts | 672 | bl restore_interrupts |
678 | addi r3,r1,STACK_FRAME_OVERHEAD | 673 | addi r3,r1,STACK_FRAME_OVERHEAD |
679 | bl .do_notify_resume | 674 | bl do_notify_resume |
680 | b .ret_from_except | 675 | b ret_from_except |
681 | 676 | ||
682 | resume_kernel: | 677 | resume_kernel: |
683 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ | 678 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
@@ -730,7 +725,7 @@ resume_kernel: | |||
730 | * sure we are soft-disabled first and reconcile irq state. | 725 | * sure we are soft-disabled first and reconcile irq state. |
731 | */ | 726 | */ |
732 | RECONCILE_IRQ_STATE(r3,r4) | 727 | RECONCILE_IRQ_STATE(r3,r4) |
733 | 1: bl .preempt_schedule_irq | 728 | 1: bl preempt_schedule_irq |
734 | 729 | ||
735 | /* Re-test flags and eventually loop */ | 730 | /* Re-test flags and eventually loop */ |
736 | CURRENT_THREAD_INFO(r9, r1) | 731 | CURRENT_THREAD_INFO(r9, r1) |
@@ -792,7 +787,7 @@ restore_no_replay: | |||
792 | */ | 787 | */ |
793 | do_restore: | 788 | do_restore: |
794 | #ifdef CONFIG_PPC_BOOK3E | 789 | #ifdef CONFIG_PPC_BOOK3E |
795 | b .exception_return_book3e | 790 | b exception_return_book3e |
796 | #else | 791 | #else |
797 | /* | 792 | /* |
798 | * Clear the reservation. If we know the CPU tracks the address of | 793 | * Clear the reservation. If we know the CPU tracks the address of |
@@ -907,7 +902,7 @@ restore_check_irq_replay: | |||
907 | * | 902 | * |
908 | * Still, this might be useful for things like hash_page | 903 | * Still, this might be useful for things like hash_page |
909 | */ | 904 | */ |
910 | bl .__check_irq_replay | 905 | bl __check_irq_replay |
911 | cmpwi cr0,r3,0 | 906 | cmpwi cr0,r3,0 |
912 | beq restore_no_replay | 907 | beq restore_no_replay |
913 | 908 | ||
@@ -928,13 +923,13 @@ restore_check_irq_replay: | |||
928 | cmpwi cr0,r3,0x500 | 923 | cmpwi cr0,r3,0x500 |
929 | bne 1f | 924 | bne 1f |
930 | addi r3,r1,STACK_FRAME_OVERHEAD; | 925 | addi r3,r1,STACK_FRAME_OVERHEAD; |
931 | bl .do_IRQ | 926 | bl do_IRQ |
932 | b .ret_from_except | 927 | b ret_from_except |
933 | 1: cmpwi cr0,r3,0x900 | 928 | 1: cmpwi cr0,r3,0x900 |
934 | bne 1f | 929 | bne 1f |
935 | addi r3,r1,STACK_FRAME_OVERHEAD; | 930 | addi r3,r1,STACK_FRAME_OVERHEAD; |
936 | bl .timer_interrupt | 931 | bl timer_interrupt |
937 | b .ret_from_except | 932 | b ret_from_except |
938 | #ifdef CONFIG_PPC_DOORBELL | 933 | #ifdef CONFIG_PPC_DOORBELL |
939 | 1: | 934 | 1: |
940 | #ifdef CONFIG_PPC_BOOK3E | 935 | #ifdef CONFIG_PPC_BOOK3E |
@@ -948,14 +943,14 @@ restore_check_irq_replay: | |||
948 | #endif /* CONFIG_PPC_BOOK3E */ | 943 | #endif /* CONFIG_PPC_BOOK3E */ |
949 | bne 1f | 944 | bne 1f |
950 | addi r3,r1,STACK_FRAME_OVERHEAD; | 945 | addi r3,r1,STACK_FRAME_OVERHEAD; |
951 | bl .doorbell_exception | 946 | bl doorbell_exception |
952 | b .ret_from_except | 947 | b ret_from_except |
953 | #endif /* CONFIG_PPC_DOORBELL */ | 948 | #endif /* CONFIG_PPC_DOORBELL */ |
954 | 1: b .ret_from_except /* What else to do here ? */ | 949 | 1: b ret_from_except /* What else to do here ? */ |
955 | 950 | ||
956 | unrecov_restore: | 951 | unrecov_restore: |
957 | addi r3,r1,STACK_FRAME_OVERHEAD | 952 | addi r3,r1,STACK_FRAME_OVERHEAD |
958 | bl .unrecoverable_exception | 953 | bl unrecoverable_exception |
959 | b unrecov_restore | 954 | b unrecov_restore |
960 | 955 | ||
961 | #ifdef CONFIG_PPC_RTAS | 956 | #ifdef CONFIG_PPC_RTAS |
@@ -1021,7 +1016,7 @@ _GLOBAL(enter_rtas) | |||
1021 | std r6,PACASAVEDMSR(r13) | 1016 | std r6,PACASAVEDMSR(r13) |
1022 | 1017 | ||
1023 | /* Setup our real return addr */ | 1018 | /* Setup our real return addr */ |
1024 | LOAD_REG_ADDR(r4,.rtas_return_loc) | 1019 | LOAD_REG_ADDR(r4,rtas_return_loc) |
1025 | clrldi r4,r4,2 /* convert to realmode address */ | 1020 | clrldi r4,r4,2 /* convert to realmode address */ |
1026 | mtlr r4 | 1021 | mtlr r4 |
1027 | 1022 | ||
@@ -1045,7 +1040,7 @@ _GLOBAL(enter_rtas) | |||
1045 | rfid | 1040 | rfid |
1046 | b . /* prevent speculative execution */ | 1041 | b . /* prevent speculative execution */ |
1047 | 1042 | ||
1048 | _STATIC(rtas_return_loc) | 1043 | rtas_return_loc: |
1049 | FIXUP_ENDIAN | 1044 | FIXUP_ENDIAN |
1050 | 1045 | ||
1051 | /* relocation is off at this point */ | 1046 | /* relocation is off at this point */ |
@@ -1054,7 +1049,7 @@ _STATIC(rtas_return_loc) | |||
1054 | 1049 | ||
1055 | bcl 20,31,$+4 | 1050 | bcl 20,31,$+4 |
1056 | 0: mflr r3 | 1051 | 0: mflr r3 |
1057 | ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ | 1052 | ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ |
1058 | 1053 | ||
1059 | mfmsr r6 | 1054 | mfmsr r6 |
1060 | li r0,MSR_RI | 1055 | li r0,MSR_RI |
@@ -1071,9 +1066,9 @@ _STATIC(rtas_return_loc) | |||
1071 | b . /* prevent speculative execution */ | 1066 | b . /* prevent speculative execution */ |
1072 | 1067 | ||
1073 | .align 3 | 1068 | .align 3 |
1074 | 1: .llong .rtas_restore_regs | 1069 | 1: .llong rtas_restore_regs |
1075 | 1070 | ||
1076 | _STATIC(rtas_restore_regs) | 1071 | rtas_restore_regs: |
1077 | /* relocation is on at this point */ | 1072 | /* relocation is on at this point */ |
1078 | REST_GPR(2, r1) /* Restore the TOC */ | 1073 | REST_GPR(2, r1) /* Restore the TOC */ |
1079 | REST_GPR(13, r1) /* Restore paca */ | 1074 | REST_GPR(13, r1) /* Restore paca */ |
@@ -1173,7 +1168,7 @@ _GLOBAL(mcount) | |||
1173 | _GLOBAL(_mcount) | 1168 | _GLOBAL(_mcount) |
1174 | blr | 1169 | blr |
1175 | 1170 | ||
1176 | _GLOBAL(ftrace_caller) | 1171 | _GLOBAL_TOC(ftrace_caller) |
1177 | /* Taken from output of objdump from lib64/glibc */ | 1172 | /* Taken from output of objdump from lib64/glibc */ |
1178 | mflr r3 | 1173 | mflr r3 |
1179 | ld r11, 0(r1) | 1174 | ld r11, 0(r1) |
@@ -1197,10 +1192,7 @@ _GLOBAL(ftrace_graph_stub) | |||
1197 | _GLOBAL(ftrace_stub) | 1192 | _GLOBAL(ftrace_stub) |
1198 | blr | 1193 | blr |
1199 | #else | 1194 | #else |
1200 | _GLOBAL(mcount) | 1195 | _GLOBAL_TOC(_mcount) |
1201 | blr | ||
1202 | |||
1203 | _GLOBAL(_mcount) | ||
1204 | /* Taken from output of objdump from lib64/glibc */ | 1196 | /* Taken from output of objdump from lib64/glibc */ |
1205 | mflr r3 | 1197 | mflr r3 |
1206 | ld r11, 0(r1) | 1198 | ld r11, 0(r1) |
@@ -1238,7 +1230,7 @@ _GLOBAL(ftrace_graph_caller) | |||
1238 | ld r11, 112(r1) | 1230 | ld r11, 112(r1) |
1239 | addi r3, r11, 16 | 1231 | addi r3, r11, 16 |
1240 | 1232 | ||
1241 | bl .prepare_ftrace_return | 1233 | bl prepare_ftrace_return |
1242 | nop | 1234 | nop |
1243 | 1235 | ||
1244 | ld r0, 128(r1) | 1236 | ld r0, 128(r1) |
@@ -1254,7 +1246,7 @@ _GLOBAL(return_to_handler) | |||
1254 | mr r31, r1 | 1246 | mr r31, r1 |
1255 | stdu r1, -112(r1) | 1247 | stdu r1, -112(r1) |
1256 | 1248 | ||
1257 | bl .ftrace_return_to_handler | 1249 | bl ftrace_return_to_handler |
1258 | nop | 1250 | nop |
1259 | 1251 | ||
1260 | /* return value has real return address */ | 1252 | /* return value has real return address */ |
@@ -1284,7 +1276,7 @@ _GLOBAL(mod_return_to_handler) | |||
1284 | */ | 1276 | */ |
1285 | ld r2, PACATOC(r13) | 1277 | ld r2, PACATOC(r13) |
1286 | 1278 | ||
1287 | bl .ftrace_return_to_handler | 1279 | bl ftrace_return_to_handler |
1288 | nop | 1280 | nop |
1289 | 1281 | ||
1290 | /* return value has real return address */ | 1282 | /* return value has real return address */ |
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 60d1a2259dbe..59e4ba74975d 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c | |||
@@ -30,6 +30,7 @@ extern u32 epapr_ev_idle_start[]; | |||
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | bool epapr_paravirt_enabled; | 32 | bool epapr_paravirt_enabled; |
33 | static bool __maybe_unused epapr_has_idle; | ||
33 | 34 | ||
34 | static int __init early_init_dt_scan_epapr(unsigned long node, | 35 | static int __init early_init_dt_scan_epapr(unsigned long node, |
35 | const char *uname, | 36 | const char *uname, |
@@ -56,7 +57,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node, | |||
56 | 57 | ||
57 | #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) | 58 | #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) |
58 | if (of_get_flat_dt_prop(node, "has-idle", NULL)) | 59 | if (of_get_flat_dt_prop(node, "has-idle", NULL)) |
59 | ppc_md.power_save = epapr_ev_idle; | 60 | epapr_has_idle = true; |
60 | #endif | 61 | #endif |
61 | 62 | ||
62 | epapr_paravirt_enabled = true; | 63 | epapr_paravirt_enabled = true; |
@@ -71,3 +72,14 @@ int __init epapr_paravirt_early_init(void) | |||
71 | return 0; | 72 | return 0; |
72 | } | 73 | } |
73 | 74 | ||
75 | static int __init epapr_idle_init(void) | ||
76 | { | ||
77 | #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) | ||
78 | if (epapr_has_idle) | ||
79 | ppc_md.power_save = epapr_ev_idle; | ||
80 | #endif | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | postcore_initcall(epapr_idle_init); | ||
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index c1bee3ce9d1f..771b4e92e5d9 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -499,7 +499,7 @@ exc_##n##_bad_stack: \ | |||
499 | CHECK_NAPPING(); \ | 499 | CHECK_NAPPING(); \ |
500 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 500 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
501 | bl hdlr; \ | 501 | bl hdlr; \ |
502 | b .ret_from_except_lite; | 502 | b ret_from_except_lite; |
503 | 503 | ||
504 | /* This value is used to mark exception frames on the stack. */ | 504 | /* This value is used to mark exception frames on the stack. */ |
505 | .section ".toc","aw" | 505 | .section ".toc","aw" |
@@ -550,11 +550,11 @@ interrupt_end_book3e: | |||
550 | CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, | 550 | CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, |
551 | PROLOG_ADDITION_NONE) | 551 | PROLOG_ADDITION_NONE) |
552 | EXCEPTION_COMMON_CRIT(0x100) | 552 | EXCEPTION_COMMON_CRIT(0x100) |
553 | bl .save_nvgprs | 553 | bl save_nvgprs |
554 | bl special_reg_save | 554 | bl special_reg_save |
555 | CHECK_NAPPING(); | 555 | CHECK_NAPPING(); |
556 | addi r3,r1,STACK_FRAME_OVERHEAD | 556 | addi r3,r1,STACK_FRAME_OVERHEAD |
557 | bl .unknown_exception | 557 | bl unknown_exception |
558 | b ret_from_crit_except | 558 | b ret_from_crit_except |
559 | 559 | ||
560 | /* Machine Check Interrupt */ | 560 | /* Machine Check Interrupt */ |
@@ -562,11 +562,11 @@ interrupt_end_book3e: | |||
562 | MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, | 562 | MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, |
563 | PROLOG_ADDITION_NONE) | 563 | PROLOG_ADDITION_NONE) |
564 | EXCEPTION_COMMON_MC(0x000) | 564 | EXCEPTION_COMMON_MC(0x000) |
565 | bl .save_nvgprs | 565 | bl save_nvgprs |
566 | bl special_reg_save | 566 | bl special_reg_save |
567 | CHECK_NAPPING(); | 567 | CHECK_NAPPING(); |
568 | addi r3,r1,STACK_FRAME_OVERHEAD | 568 | addi r3,r1,STACK_FRAME_OVERHEAD |
569 | bl .machine_check_exception | 569 | bl machine_check_exception |
570 | b ret_from_mc_except | 570 | b ret_from_mc_except |
571 | 571 | ||
572 | /* Data Storage Interrupt */ | 572 | /* Data Storage Interrupt */ |
@@ -591,7 +591,7 @@ interrupt_end_book3e: | |||
591 | 591 | ||
592 | /* External Input Interrupt */ | 592 | /* External Input Interrupt */ |
593 | MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, | 593 | MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, |
594 | external_input, .do_IRQ, ACK_NONE) | 594 | external_input, do_IRQ, ACK_NONE) |
595 | 595 | ||
596 | /* Alignment */ | 596 | /* Alignment */ |
597 | START_EXCEPTION(alignment); | 597 | START_EXCEPTION(alignment); |
@@ -612,9 +612,9 @@ interrupt_end_book3e: | |||
612 | std r14,_DSISR(r1) | 612 | std r14,_DSISR(r1) |
613 | addi r3,r1,STACK_FRAME_OVERHEAD | 613 | addi r3,r1,STACK_FRAME_OVERHEAD |
614 | ld r14,PACA_EXGEN+EX_R14(r13) | 614 | ld r14,PACA_EXGEN+EX_R14(r13) |
615 | bl .save_nvgprs | 615 | bl save_nvgprs |
616 | bl .program_check_exception | 616 | bl program_check_exception |
617 | b .ret_from_except | 617 | b ret_from_except |
618 | 618 | ||
619 | /* Floating Point Unavailable Interrupt */ | 619 | /* Floating Point Unavailable Interrupt */ |
620 | START_EXCEPTION(fp_unavailable); | 620 | START_EXCEPTION(fp_unavailable); |
@@ -625,13 +625,13 @@ interrupt_end_book3e: | |||
625 | ld r12,_MSR(r1) | 625 | ld r12,_MSR(r1) |
626 | andi. r0,r12,MSR_PR; | 626 | andi. r0,r12,MSR_PR; |
627 | beq- 1f | 627 | beq- 1f |
628 | bl .load_up_fpu | 628 | bl load_up_fpu |
629 | b fast_exception_return | 629 | b fast_exception_return |
630 | 1: INTS_DISABLE | 630 | 1: INTS_DISABLE |
631 | bl .save_nvgprs | 631 | bl save_nvgprs |
632 | addi r3,r1,STACK_FRAME_OVERHEAD | 632 | addi r3,r1,STACK_FRAME_OVERHEAD |
633 | bl .kernel_fp_unavailable_exception | 633 | bl kernel_fp_unavailable_exception |
634 | b .ret_from_except | 634 | b ret_from_except |
635 | 635 | ||
636 | /* Altivec Unavailable Interrupt */ | 636 | /* Altivec Unavailable Interrupt */ |
637 | START_EXCEPTION(altivec_unavailable); | 637 | START_EXCEPTION(altivec_unavailable); |
@@ -644,16 +644,16 @@ BEGIN_FTR_SECTION | |||
644 | ld r12,_MSR(r1) | 644 | ld r12,_MSR(r1) |
645 | andi. r0,r12,MSR_PR; | 645 | andi. r0,r12,MSR_PR; |
646 | beq- 1f | 646 | beq- 1f |
647 | bl .load_up_altivec | 647 | bl load_up_altivec |
648 | b fast_exception_return | 648 | b fast_exception_return |
649 | 1: | 649 | 1: |
650 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 650 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
651 | #endif | 651 | #endif |
652 | INTS_DISABLE | 652 | INTS_DISABLE |
653 | bl .save_nvgprs | 653 | bl save_nvgprs |
654 | addi r3,r1,STACK_FRAME_OVERHEAD | 654 | addi r3,r1,STACK_FRAME_OVERHEAD |
655 | bl .altivec_unavailable_exception | 655 | bl altivec_unavailable_exception |
656 | b .ret_from_except | 656 | b ret_from_except |
657 | 657 | ||
658 | /* AltiVec Assist */ | 658 | /* AltiVec Assist */ |
659 | START_EXCEPTION(altivec_assist); | 659 | START_EXCEPTION(altivec_assist); |
@@ -662,39 +662,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
662 | PROLOG_ADDITION_NONE) | 662 | PROLOG_ADDITION_NONE) |
663 | EXCEPTION_COMMON(0x220) | 663 | EXCEPTION_COMMON(0x220) |
664 | INTS_DISABLE | 664 | INTS_DISABLE |
665 | bl .save_nvgprs | 665 | bl save_nvgprs |
666 | addi r3,r1,STACK_FRAME_OVERHEAD | 666 | addi r3,r1,STACK_FRAME_OVERHEAD |
667 | #ifdef CONFIG_ALTIVEC | 667 | #ifdef CONFIG_ALTIVEC |
668 | BEGIN_FTR_SECTION | 668 | BEGIN_FTR_SECTION |
669 | bl .altivec_assist_exception | 669 | bl altivec_assist_exception |
670 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 670 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
671 | #else | 671 | #else |
672 | bl .unknown_exception | 672 | bl unknown_exception |
673 | #endif | 673 | #endif |
674 | b .ret_from_except | 674 | b ret_from_except |
675 | 675 | ||
676 | 676 | ||
677 | /* Decrementer Interrupt */ | 677 | /* Decrementer Interrupt */ |
678 | MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, | 678 | MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, |
679 | decrementer, .timer_interrupt, ACK_DEC) | 679 | decrementer, timer_interrupt, ACK_DEC) |
680 | 680 | ||
681 | /* Fixed Interval Timer Interrupt */ | 681 | /* Fixed Interval Timer Interrupt */ |
682 | MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, | 682 | MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, |
683 | fixed_interval, .unknown_exception, ACK_FIT) | 683 | fixed_interval, unknown_exception, ACK_FIT) |
684 | 684 | ||
685 | /* Watchdog Timer Interrupt */ | 685 | /* Watchdog Timer Interrupt */ |
686 | START_EXCEPTION(watchdog); | 686 | START_EXCEPTION(watchdog); |
687 | CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, | 687 | CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, |
688 | PROLOG_ADDITION_NONE) | 688 | PROLOG_ADDITION_NONE) |
689 | EXCEPTION_COMMON_CRIT(0x9f0) | 689 | EXCEPTION_COMMON_CRIT(0x9f0) |
690 | bl .save_nvgprs | 690 | bl save_nvgprs |
691 | bl special_reg_save | 691 | bl special_reg_save |
692 | CHECK_NAPPING(); | 692 | CHECK_NAPPING(); |
693 | addi r3,r1,STACK_FRAME_OVERHEAD | 693 | addi r3,r1,STACK_FRAME_OVERHEAD |
694 | #ifdef CONFIG_BOOKE_WDT | 694 | #ifdef CONFIG_BOOKE_WDT |
695 | bl .WatchdogException | 695 | bl WatchdogException |
696 | #else | 696 | #else |
697 | bl .unknown_exception | 697 | bl unknown_exception |
698 | #endif | 698 | #endif |
699 | b ret_from_crit_except | 699 | b ret_from_crit_except |
700 | 700 | ||
@@ -712,10 +712,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
712 | PROLOG_ADDITION_NONE) | 712 | PROLOG_ADDITION_NONE) |
713 | EXCEPTION_COMMON(0xf20) | 713 | EXCEPTION_COMMON(0xf20) |
714 | INTS_DISABLE | 714 | INTS_DISABLE |
715 | bl .save_nvgprs | 715 | bl save_nvgprs |
716 | addi r3,r1,STACK_FRAME_OVERHEAD | 716 | addi r3,r1,STACK_FRAME_OVERHEAD |
717 | bl .unknown_exception | 717 | bl unknown_exception |
718 | b .ret_from_except | 718 | b ret_from_except |
719 | 719 | ||
720 | /* Debug exception as a critical interrupt*/ | 720 | /* Debug exception as a critical interrupt*/ |
721 | START_EXCEPTION(debug_crit); | 721 | START_EXCEPTION(debug_crit); |
@@ -774,9 +774,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
774 | mr r4,r14 | 774 | mr r4,r14 |
775 | ld r14,PACA_EXCRIT+EX_R14(r13) | 775 | ld r14,PACA_EXCRIT+EX_R14(r13) |
776 | ld r15,PACA_EXCRIT+EX_R15(r13) | 776 | ld r15,PACA_EXCRIT+EX_R15(r13) |
777 | bl .save_nvgprs | 777 | bl save_nvgprs |
778 | bl .DebugException | 778 | bl DebugException |
779 | b .ret_from_except | 779 | b ret_from_except |
780 | 780 | ||
781 | kernel_dbg_exc: | 781 | kernel_dbg_exc: |
782 | b . /* NYI */ | 782 | b . /* NYI */ |
@@ -839,9 +839,9 @@ kernel_dbg_exc: | |||
839 | mr r4,r14 | 839 | mr r4,r14 |
840 | ld r14,PACA_EXDBG+EX_R14(r13) | 840 | ld r14,PACA_EXDBG+EX_R14(r13) |
841 | ld r15,PACA_EXDBG+EX_R15(r13) | 841 | ld r15,PACA_EXDBG+EX_R15(r13) |
842 | bl .save_nvgprs | 842 | bl save_nvgprs |
843 | bl .DebugException | 843 | bl DebugException |
844 | b .ret_from_except | 844 | b ret_from_except |
845 | 845 | ||
846 | START_EXCEPTION(perfmon); | 846 | START_EXCEPTION(perfmon); |
847 | NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, | 847 | NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, |
@@ -850,23 +850,23 @@ kernel_dbg_exc: | |||
850 | INTS_DISABLE | 850 | INTS_DISABLE |
851 | CHECK_NAPPING() | 851 | CHECK_NAPPING() |
852 | addi r3,r1,STACK_FRAME_OVERHEAD | 852 | addi r3,r1,STACK_FRAME_OVERHEAD |
853 | bl .performance_monitor_exception | 853 | bl performance_monitor_exception |
854 | b .ret_from_except_lite | 854 | b ret_from_except_lite |
855 | 855 | ||
856 | /* Doorbell interrupt */ | 856 | /* Doorbell interrupt */ |
857 | MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, | 857 | MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, |
858 | doorbell, .doorbell_exception, ACK_NONE) | 858 | doorbell, doorbell_exception, ACK_NONE) |
859 | 859 | ||
860 | /* Doorbell critical Interrupt */ | 860 | /* Doorbell critical Interrupt */ |
861 | START_EXCEPTION(doorbell_crit); | 861 | START_EXCEPTION(doorbell_crit); |
862 | CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, | 862 | CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, |
863 | PROLOG_ADDITION_NONE) | 863 | PROLOG_ADDITION_NONE) |
864 | EXCEPTION_COMMON_CRIT(0x2a0) | 864 | EXCEPTION_COMMON_CRIT(0x2a0) |
865 | bl .save_nvgprs | 865 | bl save_nvgprs |
866 | bl special_reg_save | 866 | bl special_reg_save |
867 | CHECK_NAPPING(); | 867 | CHECK_NAPPING(); |
868 | addi r3,r1,STACK_FRAME_OVERHEAD | 868 | addi r3,r1,STACK_FRAME_OVERHEAD |
869 | bl .unknown_exception | 869 | bl unknown_exception |
870 | b ret_from_crit_except | 870 | b ret_from_crit_except |
871 | 871 | ||
872 | /* | 872 | /* |
@@ -878,21 +878,21 @@ kernel_dbg_exc: | |||
878 | PROLOG_ADDITION_NONE) | 878 | PROLOG_ADDITION_NONE) |
879 | EXCEPTION_COMMON(0x2c0) | 879 | EXCEPTION_COMMON(0x2c0) |
880 | addi r3,r1,STACK_FRAME_OVERHEAD | 880 | addi r3,r1,STACK_FRAME_OVERHEAD |
881 | bl .save_nvgprs | 881 | bl save_nvgprs |
882 | INTS_RESTORE_HARD | 882 | INTS_RESTORE_HARD |
883 | bl .unknown_exception | 883 | bl unknown_exception |
884 | b .ret_from_except | 884 | b ret_from_except |
885 | 885 | ||
886 | /* Guest Doorbell critical Interrupt */ | 886 | /* Guest Doorbell critical Interrupt */ |
887 | START_EXCEPTION(guest_doorbell_crit); | 887 | START_EXCEPTION(guest_doorbell_crit); |
888 | CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, | 888 | CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, |
889 | PROLOG_ADDITION_NONE) | 889 | PROLOG_ADDITION_NONE) |
890 | EXCEPTION_COMMON_CRIT(0x2e0) | 890 | EXCEPTION_COMMON_CRIT(0x2e0) |
891 | bl .save_nvgprs | 891 | bl save_nvgprs |
892 | bl special_reg_save | 892 | bl special_reg_save |
893 | CHECK_NAPPING(); | 893 | CHECK_NAPPING(); |
894 | addi r3,r1,STACK_FRAME_OVERHEAD | 894 | addi r3,r1,STACK_FRAME_OVERHEAD |
895 | bl .unknown_exception | 895 | bl unknown_exception |
896 | b ret_from_crit_except | 896 | b ret_from_crit_except |
897 | 897 | ||
898 | /* Hypervisor call */ | 898 | /* Hypervisor call */ |
@@ -901,10 +901,10 @@ kernel_dbg_exc: | |||
901 | PROLOG_ADDITION_NONE) | 901 | PROLOG_ADDITION_NONE) |
902 | EXCEPTION_COMMON(0x310) | 902 | EXCEPTION_COMMON(0x310) |
903 | addi r3,r1,STACK_FRAME_OVERHEAD | 903 | addi r3,r1,STACK_FRAME_OVERHEAD |
904 | bl .save_nvgprs | 904 | bl save_nvgprs |
905 | INTS_RESTORE_HARD | 905 | INTS_RESTORE_HARD |
906 | bl .unknown_exception | 906 | bl unknown_exception |
907 | b .ret_from_except | 907 | b ret_from_except |
908 | 908 | ||
909 | /* Embedded Hypervisor priviledged */ | 909 | /* Embedded Hypervisor priviledged */ |
910 | START_EXCEPTION(ehpriv); | 910 | START_EXCEPTION(ehpriv); |
@@ -912,10 +912,10 @@ kernel_dbg_exc: | |||
912 | PROLOG_ADDITION_NONE) | 912 | PROLOG_ADDITION_NONE) |
913 | EXCEPTION_COMMON(0x320) | 913 | EXCEPTION_COMMON(0x320) |
914 | addi r3,r1,STACK_FRAME_OVERHEAD | 914 | addi r3,r1,STACK_FRAME_OVERHEAD |
915 | bl .save_nvgprs | 915 | bl save_nvgprs |
916 | INTS_RESTORE_HARD | 916 | INTS_RESTORE_HARD |
917 | bl .unknown_exception | 917 | bl unknown_exception |
918 | b .ret_from_except | 918 | b ret_from_except |
919 | 919 | ||
920 | /* LRAT Error interrupt */ | 920 | /* LRAT Error interrupt */ |
921 | START_EXCEPTION(lrat_error); | 921 | START_EXCEPTION(lrat_error); |
@@ -1014,16 +1014,16 @@ storage_fault_common: | |||
1014 | mr r5,r15 | 1014 | mr r5,r15 |
1015 | ld r14,PACA_EXGEN+EX_R14(r13) | 1015 | ld r14,PACA_EXGEN+EX_R14(r13) |
1016 | ld r15,PACA_EXGEN+EX_R15(r13) | 1016 | ld r15,PACA_EXGEN+EX_R15(r13) |
1017 | bl .do_page_fault | 1017 | bl do_page_fault |
1018 | cmpdi r3,0 | 1018 | cmpdi r3,0 |
1019 | bne- 1f | 1019 | bne- 1f |
1020 | b .ret_from_except_lite | 1020 | b ret_from_except_lite |
1021 | 1: bl .save_nvgprs | 1021 | 1: bl save_nvgprs |
1022 | mr r5,r3 | 1022 | mr r5,r3 |
1023 | addi r3,r1,STACK_FRAME_OVERHEAD | 1023 | addi r3,r1,STACK_FRAME_OVERHEAD |
1024 | ld r4,_DAR(r1) | 1024 | ld r4,_DAR(r1) |
1025 | bl .bad_page_fault | 1025 | bl bad_page_fault |
1026 | b .ret_from_except | 1026 | b ret_from_except |
1027 | 1027 | ||
1028 | /* | 1028 | /* |
1029 | * Alignment exception doesn't fit entirely in the 0x100 bytes so it | 1029 | * Alignment exception doesn't fit entirely in the 0x100 bytes so it |
@@ -1035,10 +1035,10 @@ alignment_more: | |||
1035 | addi r3,r1,STACK_FRAME_OVERHEAD | 1035 | addi r3,r1,STACK_FRAME_OVERHEAD |
1036 | ld r14,PACA_EXGEN+EX_R14(r13) | 1036 | ld r14,PACA_EXGEN+EX_R14(r13) |
1037 | ld r15,PACA_EXGEN+EX_R15(r13) | 1037 | ld r15,PACA_EXGEN+EX_R15(r13) |
1038 | bl .save_nvgprs | 1038 | bl save_nvgprs |
1039 | INTS_RESTORE_HARD | 1039 | INTS_RESTORE_HARD |
1040 | bl .alignment_exception | 1040 | bl alignment_exception |
1041 | b .ret_from_except | 1041 | b ret_from_except |
1042 | 1042 | ||
1043 | /* | 1043 | /* |
1044 | * We branch here from entry_64.S for the last stage of the exception | 1044 | * We branch here from entry_64.S for the last stage of the exception |
@@ -1172,7 +1172,7 @@ bad_stack_book3e: | |||
1172 | std r12,0(r11) | 1172 | std r12,0(r11) |
1173 | ld r2,PACATOC(r13) | 1173 | ld r2,PACATOC(r13) |
1174 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1174 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1175 | bl .kernel_bad_stack | 1175 | bl kernel_bad_stack |
1176 | b 1b | 1176 | b 1b |
1177 | 1177 | ||
1178 | /* | 1178 | /* |
@@ -1521,13 +1521,13 @@ _GLOBAL(start_initialization_book3e) | |||
1521 | * and always use AS 0, so we just set it up to match our link | 1521 | * and always use AS 0, so we just set it up to match our link |
1522 | * address and never use 0 based addresses. | 1522 | * address and never use 0 based addresses. |
1523 | */ | 1523 | */ |
1524 | bl .initial_tlb_book3e | 1524 | bl initial_tlb_book3e |
1525 | 1525 | ||
1526 | /* Init global core bits */ | 1526 | /* Init global core bits */ |
1527 | bl .init_core_book3e | 1527 | bl init_core_book3e |
1528 | 1528 | ||
1529 | /* Init per-thread bits */ | 1529 | /* Init per-thread bits */ |
1530 | bl .init_thread_book3e | 1530 | bl init_thread_book3e |
1531 | 1531 | ||
1532 | /* Return to common init code */ | 1532 | /* Return to common init code */ |
1533 | tovirt(r28,r28) | 1533 | tovirt(r28,r28) |
@@ -1548,7 +1548,7 @@ _GLOBAL(start_initialization_book3e) | |||
1548 | */ | 1548 | */ |
1549 | _GLOBAL(book3e_secondary_core_init_tlb_set) | 1549 | _GLOBAL(book3e_secondary_core_init_tlb_set) |
1550 | li r4,1 | 1550 | li r4,1 |
1551 | b .generic_secondary_smp_init | 1551 | b generic_secondary_smp_init |
1552 | 1552 | ||
1553 | _GLOBAL(book3e_secondary_core_init) | 1553 | _GLOBAL(book3e_secondary_core_init) |
1554 | mflr r28 | 1554 | mflr r28 |
@@ -1558,18 +1558,18 @@ _GLOBAL(book3e_secondary_core_init) | |||
1558 | bne 2f | 1558 | bne 2f |
1559 | 1559 | ||
1560 | /* Setup TLB for this core */ | 1560 | /* Setup TLB for this core */ |
1561 | bl .initial_tlb_book3e | 1561 | bl initial_tlb_book3e |
1562 | 1562 | ||
1563 | /* We can return from the above running at a different | 1563 | /* We can return from the above running at a different |
1564 | * address, so recalculate r2 (TOC) | 1564 | * address, so recalculate r2 (TOC) |
1565 | */ | 1565 | */ |
1566 | bl .relative_toc | 1566 | bl relative_toc |
1567 | 1567 | ||
1568 | /* Init global core bits */ | 1568 | /* Init global core bits */ |
1569 | 2: bl .init_core_book3e | 1569 | 2: bl init_core_book3e |
1570 | 1570 | ||
1571 | /* Init per-thread bits */ | 1571 | /* Init per-thread bits */ |
1572 | 3: bl .init_thread_book3e | 1572 | 3: bl init_thread_book3e |
1573 | 1573 | ||
1574 | /* Return to common init code at proper virtual address. | 1574 | /* Return to common init code at proper virtual address. |
1575 | * | 1575 | * |
@@ -1596,14 +1596,14 @@ _GLOBAL(book3e_secondary_thread_init) | |||
1596 | mflr r28 | 1596 | mflr r28 |
1597 | b 3b | 1597 | b 3b |
1598 | 1598 | ||
1599 | _STATIC(init_core_book3e) | 1599 | init_core_book3e: |
1600 | /* Establish the interrupt vector base */ | 1600 | /* Establish the interrupt vector base */ |
1601 | LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) | 1601 | LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) |
1602 | mtspr SPRN_IVPR,r3 | 1602 | mtspr SPRN_IVPR,r3 |
1603 | sync | 1603 | sync |
1604 | blr | 1604 | blr |
1605 | 1605 | ||
1606 | _STATIC(init_thread_book3e) | 1606 | init_thread_book3e: |
1607 | lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h | 1607 | lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h |
1608 | mtspr SPRN_EPCR,r3 | 1608 | mtspr SPRN_EPCR,r3 |
1609 | 1609 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3afd3915921a..20f11eb4dff7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -132,12 +132,12 @@ BEGIN_FTR_SECTION | |||
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | beq cr1,2f | 134 | beq cr1,2f |
135 | b .power7_wakeup_noloss | 135 | b power7_wakeup_noloss |
136 | 2: b .power7_wakeup_loss | 136 | 2: b power7_wakeup_loss |
137 | 137 | ||
138 | /* Fast Sleep wakeup on PowerNV */ | 138 | /* Fast Sleep wakeup on PowerNV */ |
139 | 8: GET_PACA(r13) | 139 | 8: GET_PACA(r13) |
140 | b .power7_wakeup_tb_loss | 140 | b power7_wakeup_tb_loss |
141 | 141 | ||
142 | 9: | 142 | 9: |
143 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) | 143 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) |
@@ -211,16 +211,16 @@ data_access_slb_pSeries: | |||
211 | #endif /* __DISABLED__ */ | 211 | #endif /* __DISABLED__ */ |
212 | mfspr r12,SPRN_SRR1 | 212 | mfspr r12,SPRN_SRR1 |
213 | #ifndef CONFIG_RELOCATABLE | 213 | #ifndef CONFIG_RELOCATABLE |
214 | b .slb_miss_realmode | 214 | b slb_miss_realmode |
215 | #else | 215 | #else |
216 | /* | 216 | /* |
217 | * We can't just use a direct branch to .slb_miss_realmode | 217 | * We can't just use a direct branch to slb_miss_realmode |
218 | * because the distance from here to there depends on where | 218 | * because the distance from here to there depends on where |
219 | * the kernel ends up being put. | 219 | * the kernel ends up being put. |
220 | */ | 220 | */ |
221 | mfctr r11 | 221 | mfctr r11 |
222 | ld r10,PACAKBASE(r13) | 222 | ld r10,PACAKBASE(r13) |
223 | LOAD_HANDLER(r10, .slb_miss_realmode) | 223 | LOAD_HANDLER(r10, slb_miss_realmode) |
224 | mtctr r10 | 224 | mtctr r10 |
225 | bctr | 225 | bctr |
226 | #endif | 226 | #endif |
@@ -243,11 +243,11 @@ instruction_access_slb_pSeries: | |||
243 | #endif /* __DISABLED__ */ | 243 | #endif /* __DISABLED__ */ |
244 | mfspr r12,SPRN_SRR1 | 244 | mfspr r12,SPRN_SRR1 |
245 | #ifndef CONFIG_RELOCATABLE | 245 | #ifndef CONFIG_RELOCATABLE |
246 | b .slb_miss_realmode | 246 | b slb_miss_realmode |
247 | #else | 247 | #else |
248 | mfctr r11 | 248 | mfctr r11 |
249 | ld r10,PACAKBASE(r13) | 249 | ld r10,PACAKBASE(r13) |
250 | LOAD_HANDLER(r10, .slb_miss_realmode) | 250 | LOAD_HANDLER(r10, slb_miss_realmode) |
251 | mtctr r10 | 251 | mtctr r10 |
252 | bctr | 252 | bctr |
253 | #endif | 253 | #endif |
@@ -524,7 +524,7 @@ do_stab_bolted_pSeries: | |||
524 | std r12,PACA_EXSLB+EX_R12(r13) | 524 | std r12,PACA_EXSLB+EX_R12(r13) |
525 | GET_SCRATCH0(r10) | 525 | GET_SCRATCH0(r10) |
526 | std r10,PACA_EXSLB+EX_R13(r13) | 526 | std r10,PACA_EXSLB+EX_R13(r13) |
527 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) | 527 | EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD) |
528 | 528 | ||
529 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) | 529 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) |
530 | KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) | 530 | KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) |
@@ -769,38 +769,38 @@ kvmppc_skip_Hinterrupt: | |||
769 | 769 | ||
770 | /*** Common interrupt handlers ***/ | 770 | /*** Common interrupt handlers ***/ |
771 | 771 | ||
772 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | 772 | STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception) |
773 | 773 | ||
774 | STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) | 774 | STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) |
775 | STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) | 775 | STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt) |
776 | STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) | 776 | STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt) |
777 | #ifdef CONFIG_PPC_DOORBELL | 777 | #ifdef CONFIG_PPC_DOORBELL |
778 | STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) | 778 | STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception) |
779 | #else | 779 | #else |
780 | STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) | 780 | STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception) |
781 | #endif | 781 | #endif |
782 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | 782 | STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception) |
783 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | 783 | STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception) |
784 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | 784 | STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception) |
785 | STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) | 785 | STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt) |
786 | STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) | 786 | STD_EXCEPTION_COMMON(0xe60, hmi_exception, unknown_exception) |
787 | #ifdef CONFIG_PPC_DOORBELL | 787 | #ifdef CONFIG_PPC_DOORBELL |
788 | STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) | 788 | STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception) |
789 | #else | 789 | #else |
790 | STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) | 790 | STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception) |
791 | #endif | 791 | #endif |
792 | STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) | 792 | STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception) |
793 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | 793 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception) |
794 | STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) | 794 | STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception) |
795 | #ifdef CONFIG_ALTIVEC | 795 | #ifdef CONFIG_ALTIVEC |
796 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | 796 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception) |
797 | #else | 797 | #else |
798 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | 798 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception) |
799 | #endif | 799 | #endif |
800 | #ifdef CONFIG_CBE_RAS | 800 | #ifdef CONFIG_CBE_RAS |
801 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) | 801 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception) |
802 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) | 802 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception) |
803 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | 803 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception) |
804 | #endif /* CONFIG_CBE_RAS */ | 804 | #endif /* CONFIG_CBE_RAS */ |
805 | 805 | ||
806 | /* | 806 | /* |
@@ -829,16 +829,16 @@ data_access_slb_relon_pSeries: | |||
829 | mfspr r3,SPRN_DAR | 829 | mfspr r3,SPRN_DAR |
830 | mfspr r12,SPRN_SRR1 | 830 | mfspr r12,SPRN_SRR1 |
831 | #ifndef CONFIG_RELOCATABLE | 831 | #ifndef CONFIG_RELOCATABLE |
832 | b .slb_miss_realmode | 832 | b slb_miss_realmode |
833 | #else | 833 | #else |
834 | /* | 834 | /* |
835 | * We can't just use a direct branch to .slb_miss_realmode | 835 | * We can't just use a direct branch to slb_miss_realmode |
836 | * because the distance from here to there depends on where | 836 | * because the distance from here to there depends on where |
837 | * the kernel ends up being put. | 837 | * the kernel ends up being put. |
838 | */ | 838 | */ |
839 | mfctr r11 | 839 | mfctr r11 |
840 | ld r10,PACAKBASE(r13) | 840 | ld r10,PACAKBASE(r13) |
841 | LOAD_HANDLER(r10, .slb_miss_realmode) | 841 | LOAD_HANDLER(r10, slb_miss_realmode) |
842 | mtctr r10 | 842 | mtctr r10 |
843 | bctr | 843 | bctr |
844 | #endif | 844 | #endif |
@@ -854,11 +854,11 @@ instruction_access_slb_relon_pSeries: | |||
854 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 854 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
855 | mfspr r12,SPRN_SRR1 | 855 | mfspr r12,SPRN_SRR1 |
856 | #ifndef CONFIG_RELOCATABLE | 856 | #ifndef CONFIG_RELOCATABLE |
857 | b .slb_miss_realmode | 857 | b slb_miss_realmode |
858 | #else | 858 | #else |
859 | mfctr r11 | 859 | mfctr r11 |
860 | ld r10,PACAKBASE(r13) | 860 | ld r10,PACAKBASE(r13) |
861 | LOAD_HANDLER(r10, .slb_miss_realmode) | 861 | LOAD_HANDLER(r10, slb_miss_realmode) |
862 | mtctr r10 | 862 | mtctr r10 |
863 | bctr | 863 | bctr |
864 | #endif | 864 | #endif |
@@ -966,7 +966,7 @@ system_call_entry: | |||
966 | b system_call_common | 966 | b system_call_common |
967 | 967 | ||
968 | ppc64_runlatch_on_trampoline: | 968 | ppc64_runlatch_on_trampoline: |
969 | b .__ppc64_runlatch_on | 969 | b __ppc64_runlatch_on |
970 | 970 | ||
971 | /* | 971 | /* |
972 | * Here we have detected that the kernel stack pointer is bad. | 972 | * Here we have detected that the kernel stack pointer is bad. |
@@ -1025,7 +1025,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |||
1025 | std r12,RESULT(r1) | 1025 | std r12,RESULT(r1) |
1026 | std r11,STACK_FRAME_OVERHEAD-16(r1) | 1026 | std r11,STACK_FRAME_OVERHEAD-16(r1) |
1027 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1027 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1028 | bl .kernel_bad_stack | 1028 | bl kernel_bad_stack |
1029 | b 1b | 1029 | b 1b |
1030 | 1030 | ||
1031 | /* | 1031 | /* |
@@ -1046,7 +1046,7 @@ data_access_common: | |||
1046 | ld r3,PACA_EXGEN+EX_DAR(r13) | 1046 | ld r3,PACA_EXGEN+EX_DAR(r13) |
1047 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 1047 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
1048 | li r5,0x300 | 1048 | li r5,0x300 |
1049 | b .do_hash_page /* Try to handle as hpte fault */ | 1049 | b do_hash_page /* Try to handle as hpte fault */ |
1050 | 1050 | ||
1051 | .align 7 | 1051 | .align 7 |
1052 | .globl h_data_storage_common | 1052 | .globl h_data_storage_common |
@@ -1056,11 +1056,11 @@ h_data_storage_common: | |||
1056 | mfspr r10,SPRN_HDSISR | 1056 | mfspr r10,SPRN_HDSISR |
1057 | stw r10,PACA_EXGEN+EX_DSISR(r13) | 1057 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
1058 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) | 1058 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) |
1059 | bl .save_nvgprs | 1059 | bl save_nvgprs |
1060 | DISABLE_INTS | 1060 | DISABLE_INTS |
1061 | addi r3,r1,STACK_FRAME_OVERHEAD | 1061 | addi r3,r1,STACK_FRAME_OVERHEAD |
1062 | bl .unknown_exception | 1062 | bl unknown_exception |
1063 | b .ret_from_except | 1063 | b ret_from_except |
1064 | 1064 | ||
1065 | .align 7 | 1065 | .align 7 |
1066 | .globl instruction_access_common | 1066 | .globl instruction_access_common |
@@ -1071,9 +1071,9 @@ instruction_access_common: | |||
1071 | ld r3,_NIP(r1) | 1071 | ld r3,_NIP(r1) |
1072 | andis. r4,r12,0x5820 | 1072 | andis. r4,r12,0x5820 |
1073 | li r5,0x400 | 1073 | li r5,0x400 |
1074 | b .do_hash_page /* Try to handle as hpte fault */ | 1074 | b do_hash_page /* Try to handle as hpte fault */ |
1075 | 1075 | ||
1076 | STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) | 1076 | STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) |
1077 | 1077 | ||
1078 | /* | 1078 | /* |
1079 | * Here is the common SLB miss user that is used when going to virtual | 1079 | * Here is the common SLB miss user that is used when going to virtual |
@@ -1088,7 +1088,7 @@ slb_miss_user_common: | |||
1088 | stw r9,PACA_EXGEN+EX_CCR(r13) | 1088 | stw r9,PACA_EXGEN+EX_CCR(r13) |
1089 | std r10,PACA_EXGEN+EX_LR(r13) | 1089 | std r10,PACA_EXGEN+EX_LR(r13) |
1090 | std r11,PACA_EXGEN+EX_SRR0(r13) | 1090 | std r11,PACA_EXGEN+EX_SRR0(r13) |
1091 | bl .slb_allocate_user | 1091 | bl slb_allocate_user |
1092 | 1092 | ||
1093 | ld r10,PACA_EXGEN+EX_LR(r13) | 1093 | ld r10,PACA_EXGEN+EX_LR(r13) |
1094 | ld r3,PACA_EXGEN+EX_R3(r13) | 1094 | ld r3,PACA_EXGEN+EX_R3(r13) |
@@ -1131,9 +1131,9 @@ slb_miss_fault: | |||
1131 | unrecov_user_slb: | 1131 | unrecov_user_slb: |
1132 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | 1132 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) |
1133 | DISABLE_INTS | 1133 | DISABLE_INTS |
1134 | bl .save_nvgprs | 1134 | bl save_nvgprs |
1135 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1135 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1136 | bl .unrecoverable_exception | 1136 | bl unrecoverable_exception |
1137 | b 1b | 1137 | b 1b |
1138 | 1138 | ||
1139 | #endif /* __DISABLED__ */ | 1139 | #endif /* __DISABLED__ */ |
@@ -1158,10 +1158,10 @@ machine_check_common: | |||
1158 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 1158 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
1159 | std r3,_DAR(r1) | 1159 | std r3,_DAR(r1) |
1160 | std r4,_DSISR(r1) | 1160 | std r4,_DSISR(r1) |
1161 | bl .save_nvgprs | 1161 | bl save_nvgprs |
1162 | addi r3,r1,STACK_FRAME_OVERHEAD | 1162 | addi r3,r1,STACK_FRAME_OVERHEAD |
1163 | bl .machine_check_exception | 1163 | bl machine_check_exception |
1164 | b .ret_from_except | 1164 | b ret_from_except |
1165 | 1165 | ||
1166 | .align 7 | 1166 | .align 7 |
1167 | .globl alignment_common | 1167 | .globl alignment_common |
@@ -1175,31 +1175,31 @@ alignment_common: | |||
1175 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | 1175 | lwz r4,PACA_EXGEN+EX_DSISR(r13) |
1176 | std r3,_DAR(r1) | 1176 | std r3,_DAR(r1) |
1177 | std r4,_DSISR(r1) | 1177 | std r4,_DSISR(r1) |
1178 | bl .save_nvgprs | 1178 | bl save_nvgprs |
1179 | DISABLE_INTS | 1179 | DISABLE_INTS |
1180 | addi r3,r1,STACK_FRAME_OVERHEAD | 1180 | addi r3,r1,STACK_FRAME_OVERHEAD |
1181 | bl .alignment_exception | 1181 | bl alignment_exception |
1182 | b .ret_from_except | 1182 | b ret_from_except |
1183 | 1183 | ||
1184 | .align 7 | 1184 | .align 7 |
1185 | .globl program_check_common | 1185 | .globl program_check_common |
1186 | program_check_common: | 1186 | program_check_common: |
1187 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | 1187 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) |
1188 | bl .save_nvgprs | 1188 | bl save_nvgprs |
1189 | DISABLE_INTS | 1189 | DISABLE_INTS |
1190 | addi r3,r1,STACK_FRAME_OVERHEAD | 1190 | addi r3,r1,STACK_FRAME_OVERHEAD |
1191 | bl .program_check_exception | 1191 | bl program_check_exception |
1192 | b .ret_from_except | 1192 | b ret_from_except |
1193 | 1193 | ||
1194 | .align 7 | 1194 | .align 7 |
1195 | .globl fp_unavailable_common | 1195 | .globl fp_unavailable_common |
1196 | fp_unavailable_common: | 1196 | fp_unavailable_common: |
1197 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | 1197 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) |
1198 | bne 1f /* if from user, just load it up */ | 1198 | bne 1f /* if from user, just load it up */ |
1199 | bl .save_nvgprs | 1199 | bl save_nvgprs |
1200 | DISABLE_INTS | 1200 | DISABLE_INTS |
1201 | addi r3,r1,STACK_FRAME_OVERHEAD | 1201 | addi r3,r1,STACK_FRAME_OVERHEAD |
1202 | bl .kernel_fp_unavailable_exception | 1202 | bl kernel_fp_unavailable_exception |
1203 | BUG_OPCODE | 1203 | BUG_OPCODE |
1204 | 1: | 1204 | 1: |
1205 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1205 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
@@ -1211,15 +1211,15 @@ BEGIN_FTR_SECTION | |||
1211 | bne- 2f | 1211 | bne- 2f |
1212 | END_FTR_SECTION_IFSET(CPU_FTR_TM) | 1212 | END_FTR_SECTION_IFSET(CPU_FTR_TM) |
1213 | #endif | 1213 | #endif |
1214 | bl .load_up_fpu | 1214 | bl load_up_fpu |
1215 | b fast_exception_return | 1215 | b fast_exception_return |
1216 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1216 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1217 | 2: /* User process was in a transaction */ | 1217 | 2: /* User process was in a transaction */ |
1218 | bl .save_nvgprs | 1218 | bl save_nvgprs |
1219 | DISABLE_INTS | 1219 | DISABLE_INTS |
1220 | addi r3,r1,STACK_FRAME_OVERHEAD | 1220 | addi r3,r1,STACK_FRAME_OVERHEAD |
1221 | bl .fp_unavailable_tm | 1221 | bl fp_unavailable_tm |
1222 | b .ret_from_except | 1222 | b ret_from_except |
1223 | #endif | 1223 | #endif |
1224 | .align 7 | 1224 | .align 7 |
1225 | .globl altivec_unavailable_common | 1225 | .globl altivec_unavailable_common |
@@ -1237,24 +1237,24 @@ BEGIN_FTR_SECTION | |||
1237 | bne- 2f | 1237 | bne- 2f |
1238 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) | 1238 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) |
1239 | #endif | 1239 | #endif |
1240 | bl .load_up_altivec | 1240 | bl load_up_altivec |
1241 | b fast_exception_return | 1241 | b fast_exception_return |
1242 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1242 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1243 | 2: /* User process was in a transaction */ | 1243 | 2: /* User process was in a transaction */ |
1244 | bl .save_nvgprs | 1244 | bl save_nvgprs |
1245 | DISABLE_INTS | 1245 | DISABLE_INTS |
1246 | addi r3,r1,STACK_FRAME_OVERHEAD | 1246 | addi r3,r1,STACK_FRAME_OVERHEAD |
1247 | bl .altivec_unavailable_tm | 1247 | bl altivec_unavailable_tm |
1248 | b .ret_from_except | 1248 | b ret_from_except |
1249 | #endif | 1249 | #endif |
1250 | 1: | 1250 | 1: |
1251 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 1251 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
1252 | #endif | 1252 | #endif |
1253 | bl .save_nvgprs | 1253 | bl save_nvgprs |
1254 | DISABLE_INTS | 1254 | DISABLE_INTS |
1255 | addi r3,r1,STACK_FRAME_OVERHEAD | 1255 | addi r3,r1,STACK_FRAME_OVERHEAD |
1256 | bl .altivec_unavailable_exception | 1256 | bl altivec_unavailable_exception |
1257 | b .ret_from_except | 1257 | b ret_from_except |
1258 | 1258 | ||
1259 | .align 7 | 1259 | .align 7 |
1260 | .globl vsx_unavailable_common | 1260 | .globl vsx_unavailable_common |
@@ -1272,26 +1272,26 @@ BEGIN_FTR_SECTION | |||
1272 | bne- 2f | 1272 | bne- 2f |
1273 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) | 1273 | END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) |
1274 | #endif | 1274 | #endif |
1275 | b .load_up_vsx | 1275 | b load_up_vsx |
1276 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1276 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1277 | 2: /* User process was in a transaction */ | 1277 | 2: /* User process was in a transaction */ |
1278 | bl .save_nvgprs | 1278 | bl save_nvgprs |
1279 | DISABLE_INTS | 1279 | DISABLE_INTS |
1280 | addi r3,r1,STACK_FRAME_OVERHEAD | 1280 | addi r3,r1,STACK_FRAME_OVERHEAD |
1281 | bl .vsx_unavailable_tm | 1281 | bl vsx_unavailable_tm |
1282 | b .ret_from_except | 1282 | b ret_from_except |
1283 | #endif | 1283 | #endif |
1284 | 1: | 1284 | 1: |
1285 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | 1285 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
1286 | #endif | 1286 | #endif |
1287 | bl .save_nvgprs | 1287 | bl save_nvgprs |
1288 | DISABLE_INTS | 1288 | DISABLE_INTS |
1289 | addi r3,r1,STACK_FRAME_OVERHEAD | 1289 | addi r3,r1,STACK_FRAME_OVERHEAD |
1290 | bl .vsx_unavailable_exception | 1290 | bl vsx_unavailable_exception |
1291 | b .ret_from_except | 1291 | b ret_from_except |
1292 | 1292 | ||
1293 | STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) | 1293 | STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception) |
1294 | STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) | 1294 | STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception) |
1295 | 1295 | ||
1296 | .align 7 | 1296 | .align 7 |
1297 | .globl __end_handlers | 1297 | .globl __end_handlers |
@@ -1386,9 +1386,9 @@ _GLOBAL(opal_mc_secondary_handler) | |||
1386 | machine_check_handle_early: | 1386 | machine_check_handle_early: |
1387 | std r0,GPR0(r1) /* Save r0 */ | 1387 | std r0,GPR0(r1) /* Save r0 */ |
1388 | EXCEPTION_PROLOG_COMMON_3(0x200) | 1388 | EXCEPTION_PROLOG_COMMON_3(0x200) |
1389 | bl .save_nvgprs | 1389 | bl save_nvgprs |
1390 | addi r3,r1,STACK_FRAME_OVERHEAD | 1390 | addi r3,r1,STACK_FRAME_OVERHEAD |
1391 | bl .machine_check_early | 1391 | bl machine_check_early |
1392 | ld r12,_MSR(r1) | 1392 | ld r12,_MSR(r1) |
1393 | #ifdef CONFIG_PPC_P7_NAP | 1393 | #ifdef CONFIG_PPC_P7_NAP |
1394 | /* | 1394 | /* |
@@ -1408,11 +1408,11 @@ machine_check_handle_early: | |||
1408 | /* Supervisor state loss */ | 1408 | /* Supervisor state loss */ |
1409 | li r0,1 | 1409 | li r0,1 |
1410 | stb r0,PACA_NAPSTATELOST(r13) | 1410 | stb r0,PACA_NAPSTATELOST(r13) |
1411 | 3: bl .machine_check_queue_event | 1411 | 3: bl machine_check_queue_event |
1412 | MACHINE_CHECK_HANDLER_WINDUP | 1412 | MACHINE_CHECK_HANDLER_WINDUP |
1413 | GET_PACA(r13) | 1413 | GET_PACA(r13) |
1414 | ld r1,PACAR1(r13) | 1414 | ld r1,PACAR1(r13) |
1415 | b .power7_enter_nap_mode | 1415 | b power7_enter_nap_mode |
1416 | 4: | 1416 | 4: |
1417 | #endif | 1417 | #endif |
1418 | /* | 1418 | /* |
@@ -1444,7 +1444,7 @@ machine_check_handle_early: | |||
1444 | andi. r11,r12,MSR_RI | 1444 | andi. r11,r12,MSR_RI |
1445 | bne 2f | 1445 | bne 2f |
1446 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1446 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1447 | bl .unrecoverable_exception | 1447 | bl unrecoverable_exception |
1448 | b 1b | 1448 | b 1b |
1449 | 2: | 1449 | 2: |
1450 | /* | 1450 | /* |
@@ -1452,7 +1452,7 @@ machine_check_handle_early: | |||
1452 | * Queue up the MCE event so that we can log it later, while | 1452 | * Queue up the MCE event so that we can log it later, while |
1453 | * returning from kernel or opal call. | 1453 | * returning from kernel or opal call. |
1454 | */ | 1454 | */ |
1455 | bl .machine_check_queue_event | 1455 | bl machine_check_queue_event |
1456 | MACHINE_CHECK_HANDLER_WINDUP | 1456 | MACHINE_CHECK_HANDLER_WINDUP |
1457 | rfid | 1457 | rfid |
1458 | 9: | 1458 | 9: |
@@ -1468,7 +1468,7 @@ machine_check_handle_early: | |||
1468 | * r3 is saved in paca->slb_r3 | 1468 | * r3 is saved in paca->slb_r3 |
1469 | * We assume we aren't going to take any exceptions during this procedure. | 1469 | * We assume we aren't going to take any exceptions during this procedure. |
1470 | */ | 1470 | */ |
1471 | _GLOBAL(slb_miss_realmode) | 1471 | slb_miss_realmode: |
1472 | mflr r10 | 1472 | mflr r10 |
1473 | #ifdef CONFIG_RELOCATABLE | 1473 | #ifdef CONFIG_RELOCATABLE |
1474 | mtctr r11 | 1474 | mtctr r11 |
@@ -1477,7 +1477,7 @@ _GLOBAL(slb_miss_realmode) | |||
1477 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 1477 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
1478 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | 1478 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ |
1479 | 1479 | ||
1480 | bl .slb_allocate_realmode | 1480 | bl slb_allocate_realmode |
1481 | 1481 | ||
1482 | /* All done -- return from exception. */ | 1482 | /* All done -- return from exception. */ |
1483 | 1483 | ||
@@ -1517,9 +1517,9 @@ _GLOBAL(slb_miss_realmode) | |||
1517 | unrecov_slb: | 1517 | unrecov_slb: |
1518 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | 1518 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) |
1519 | DISABLE_INTS | 1519 | DISABLE_INTS |
1520 | bl .save_nvgprs | 1520 | bl save_nvgprs |
1521 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 1521 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
1522 | bl .unrecoverable_exception | 1522 | bl unrecoverable_exception |
1523 | b 1b | 1523 | b 1b |
1524 | 1524 | ||
1525 | 1525 | ||
@@ -1536,7 +1536,7 @@ power4_fixup_nap: | |||
1536 | * Hash table stuff | 1536 | * Hash table stuff |
1537 | */ | 1537 | */ |
1538 | .align 7 | 1538 | .align 7 |
1539 | _STATIC(do_hash_page) | 1539 | do_hash_page: |
1540 | std r3,_DAR(r1) | 1540 | std r3,_DAR(r1) |
1541 | std r4,_DSISR(r1) | 1541 | std r4,_DSISR(r1) |
1542 | 1542 | ||
@@ -1573,7 +1573,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | |||
1573 | * | 1573 | * |
1574 | * at return r3 = 0 for success, 1 for page fault, negative for error | 1574 | * at return r3 = 0 for success, 1 for page fault, negative for error |
1575 | */ | 1575 | */ |
1576 | bl .hash_page /* build HPTE if possible */ | 1576 | bl hash_page /* build HPTE if possible */ |
1577 | cmpdi r3,0 /* see if hash_page succeeded */ | 1577 | cmpdi r3,0 /* see if hash_page succeeded */ |
1578 | 1578 | ||
1579 | /* Success */ | 1579 | /* Success */ |
@@ -1587,35 +1587,35 @@ handle_page_fault: | |||
1587 | 11: ld r4,_DAR(r1) | 1587 | 11: ld r4,_DAR(r1) |
1588 | ld r5,_DSISR(r1) | 1588 | ld r5,_DSISR(r1) |
1589 | addi r3,r1,STACK_FRAME_OVERHEAD | 1589 | addi r3,r1,STACK_FRAME_OVERHEAD |
1590 | bl .do_page_fault | 1590 | bl do_page_fault |
1591 | cmpdi r3,0 | 1591 | cmpdi r3,0 |
1592 | beq+ 12f | 1592 | beq+ 12f |
1593 | bl .save_nvgprs | 1593 | bl save_nvgprs |
1594 | mr r5,r3 | 1594 | mr r5,r3 |
1595 | addi r3,r1,STACK_FRAME_OVERHEAD | 1595 | addi r3,r1,STACK_FRAME_OVERHEAD |
1596 | lwz r4,_DAR(r1) | 1596 | lwz r4,_DAR(r1) |
1597 | bl .bad_page_fault | 1597 | bl bad_page_fault |
1598 | b .ret_from_except | 1598 | b ret_from_except |
1599 | 1599 | ||
1600 | /* We have a data breakpoint exception - handle it */ | 1600 | /* We have a data breakpoint exception - handle it */ |
1601 | handle_dabr_fault: | 1601 | handle_dabr_fault: |
1602 | bl .save_nvgprs | 1602 | bl save_nvgprs |
1603 | ld r4,_DAR(r1) | 1603 | ld r4,_DAR(r1) |
1604 | ld r5,_DSISR(r1) | 1604 | ld r5,_DSISR(r1) |
1605 | addi r3,r1,STACK_FRAME_OVERHEAD | 1605 | addi r3,r1,STACK_FRAME_OVERHEAD |
1606 | bl .do_break | 1606 | bl do_break |
1607 | 12: b .ret_from_except_lite | 1607 | 12: b ret_from_except_lite |
1608 | 1608 | ||
1609 | 1609 | ||
1610 | /* We have a page fault that hash_page could handle but HV refused | 1610 | /* We have a page fault that hash_page could handle but HV refused |
1611 | * the PTE insertion | 1611 | * the PTE insertion |
1612 | */ | 1612 | */ |
1613 | 13: bl .save_nvgprs | 1613 | 13: bl save_nvgprs |
1614 | mr r5,r3 | 1614 | mr r5,r3 |
1615 | addi r3,r1,STACK_FRAME_OVERHEAD | 1615 | addi r3,r1,STACK_FRAME_OVERHEAD |
1616 | ld r4,_DAR(r1) | 1616 | ld r4,_DAR(r1) |
1617 | bl .low_hash_fault | 1617 | bl low_hash_fault |
1618 | b .ret_from_except | 1618 | b ret_from_except |
1619 | 1619 | ||
1620 | /* | 1620 | /* |
1621 | * We come here as a result of a DSI at a point where we don't want | 1621 | * We come here as a result of a DSI at a point where we don't want |
@@ -1624,16 +1624,16 @@ handle_dabr_fault: | |||
1624 | * were soft-disabled. We want to invoke the exception handler for | 1624 | * were soft-disabled. We want to invoke the exception handler for |
1625 | * the access, or panic if there isn't a handler. | 1625 | * the access, or panic if there isn't a handler. |
1626 | */ | 1626 | */ |
1627 | 77: bl .save_nvgprs | 1627 | 77: bl save_nvgprs |
1628 | mr r4,r3 | 1628 | mr r4,r3 |
1629 | addi r3,r1,STACK_FRAME_OVERHEAD | 1629 | addi r3,r1,STACK_FRAME_OVERHEAD |
1630 | li r5,SIGSEGV | 1630 | li r5,SIGSEGV |
1631 | bl .bad_page_fault | 1631 | bl bad_page_fault |
1632 | b .ret_from_except | 1632 | b ret_from_except |
1633 | 1633 | ||
1634 | /* here we have a segment miss */ | 1634 | /* here we have a segment miss */ |
1635 | do_ste_alloc: | 1635 | do_ste_alloc: |
1636 | bl .ste_allocate /* try to insert stab entry */ | 1636 | bl ste_allocate /* try to insert stab entry */ |
1637 | cmpdi r3,0 | 1637 | cmpdi r3,0 |
1638 | bne- handle_page_fault | 1638 | bne- handle_page_fault |
1639 | b fast_exception_return | 1639 | b fast_exception_return |
@@ -1646,7 +1646,7 @@ do_ste_alloc: | |||
1646 | * We assume (DAR >> 60) == 0xc. | 1646 | * We assume (DAR >> 60) == 0xc. |
1647 | */ | 1647 | */ |
1648 | .align 7 | 1648 | .align 7 |
1649 | _GLOBAL(do_stab_bolted) | 1649 | do_stab_bolted: |
1650 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 1650 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
1651 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | 1651 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ |
1652 | mfspr r11,SPRN_DAR /* ea */ | 1652 | mfspr r11,SPRN_DAR /* ea */ |
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 7213d930918d..742694c1d852 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c | |||
@@ -69,7 +69,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, | |||
69 | */ | 69 | */ |
70 | token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL); | 70 | token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL); |
71 | if (!token) | 71 | if (!token) |
72 | return 0; | 72 | return 1; |
73 | 73 | ||
74 | fw_dump.fadump_supported = 1; | 74 | fw_dump.fadump_supported = 1; |
75 | fw_dump.ibm_configure_kernel_dump = *token; | 75 | fw_dump.ibm_configure_kernel_dump = *token; |
@@ -92,7 +92,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, | |||
92 | &size); | 92 | &size); |
93 | 93 | ||
94 | if (!sections) | 94 | if (!sections) |
95 | return 0; | 95 | return 1; |
96 | 96 | ||
97 | num_sections = size / (3 * sizeof(u32)); | 97 | num_sections = size / (3 * sizeof(u32)); |
98 | 98 | ||
@@ -110,6 +110,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node, | |||
110 | break; | 110 | break; |
111 | } | 111 | } |
112 | } | 112 | } |
113 | |||
113 | return 1; | 114 | return 1; |
114 | } | 115 | } |
115 | 116 | ||
@@ -645,7 +646,7 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) | |||
645 | } | 646 | } |
646 | /* Lower 4 bytes of reg_value contains logical cpu id */ | 647 | /* Lower 4 bytes of reg_value contains logical cpu id */ |
647 | cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK; | 648 | cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK; |
648 | if (!cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) { | 649 | if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) { |
649 | SKIP_TO_NEXT_CPU(reg_entry); | 650 | SKIP_TO_NEXT_CPU(reg_entry); |
650 | continue; | 651 | continue; |
651 | } | 652 | } |
@@ -662,9 +663,11 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) | |||
662 | } | 663 | } |
663 | fadump_final_note(note_buf); | 664 | fadump_final_note(note_buf); |
664 | 665 | ||
665 | pr_debug("Updating elfcore header (%llx) with cpu notes\n", | 666 | if (fdh) { |
667 | pr_debug("Updating elfcore header (%llx) with cpu notes\n", | ||
666 | fdh->elfcorehdr_addr); | 668 | fdh->elfcorehdr_addr); |
667 | fadump_update_elfcore_header((char *)__va(fdh->elfcorehdr_addr)); | 669 | fadump_update_elfcore_header((char *)__va(fdh->elfcorehdr_addr)); |
670 | } | ||
668 | return 0; | 671 | return 0; |
669 | 672 | ||
670 | error_out: | 673 | error_out: |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 6a014c763cc7..f202d0731b06 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -105,11 +105,9 @@ __ftrace_make_nop(struct module *mod, | |||
105 | struct dyn_ftrace *rec, unsigned long addr) | 105 | struct dyn_ftrace *rec, unsigned long addr) |
106 | { | 106 | { |
107 | unsigned int op; | 107 | unsigned int op; |
108 | unsigned int jmp[5]; | ||
109 | unsigned long ptr; | 108 | unsigned long ptr; |
110 | unsigned long ip = rec->ip; | 109 | unsigned long ip = rec->ip; |
111 | unsigned long tramp; | 110 | void *tramp; |
112 | int offset; | ||
113 | 111 | ||
114 | /* read where this goes */ | 112 | /* read where this goes */ |
115 | if (probe_kernel_read(&op, (void *)ip, sizeof(int))) | 113 | if (probe_kernel_read(&op, (void *)ip, sizeof(int))) |
@@ -122,96 +120,41 @@ __ftrace_make_nop(struct module *mod, | |||
122 | } | 120 | } |
123 | 121 | ||
124 | /* lets find where the pointer goes */ | 122 | /* lets find where the pointer goes */ |
125 | tramp = find_bl_target(ip, op); | 123 | tramp = (void *)find_bl_target(ip, op); |
126 | |||
127 | /* | ||
128 | * On PPC64 the trampoline looks like: | ||
129 | * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> | ||
130 | * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> | ||
131 | * Where the bytes 2,3,6 and 7 make up the 32bit offset | ||
132 | * to the TOC that holds the pointer. | ||
133 | * to jump to. | ||
134 | * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) | ||
135 | * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) | ||
136 | * The actually address is 32 bytes from the offset | ||
137 | * into the TOC. | ||
138 | * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) | ||
139 | */ | ||
140 | |||
141 | pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); | ||
142 | |||
143 | /* Find where the trampoline jumps to */ | ||
144 | if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { | ||
145 | printk(KERN_ERR "Failed to read %lx\n", tramp); | ||
146 | return -EFAULT; | ||
147 | } | ||
148 | 124 | ||
149 | pr_devel(" %08x %08x", jmp[0], jmp[1]); | 125 | pr_devel("ip:%lx jumps to %p", ip, tramp); |
150 | 126 | ||
151 | /* verify that this is what we expect it to be */ | 127 | if (!is_module_trampoline(tramp)) { |
152 | if (((jmp[0] & 0xffff0000) != 0x3d820000) || | ||
153 | ((jmp[1] & 0xffff0000) != 0x398c0000) || | ||
154 | (jmp[2] != 0xf8410028) || | ||
155 | (jmp[3] != 0xe96c0020) || | ||
156 | (jmp[4] != 0xe84c0028)) { | ||
157 | printk(KERN_ERR "Not a trampoline\n"); | 128 | printk(KERN_ERR "Not a trampoline\n"); |
158 | return -EINVAL; | 129 | return -EINVAL; |
159 | } | 130 | } |
160 | 131 | ||
161 | /* The bottom half is signed extended */ | 132 | if (module_trampoline_target(mod, tramp, &ptr)) { |
162 | offset = ((unsigned)((unsigned short)jmp[0]) << 16) + | 133 | printk(KERN_ERR "Failed to get trampoline target\n"); |
163 | (int)((short)jmp[1]); | ||
164 | |||
165 | pr_devel(" %x ", offset); | ||
166 | |||
167 | /* get the address this jumps too */ | ||
168 | tramp = mod->arch.toc + offset + 32; | ||
169 | pr_devel("toc: %lx", tramp); | ||
170 | |||
171 | if (probe_kernel_read(jmp, (void *)tramp, 8)) { | ||
172 | printk(KERN_ERR "Failed to read %lx\n", tramp); | ||
173 | return -EFAULT; | 134 | return -EFAULT; |
174 | } | 135 | } |
175 | 136 | ||
176 | pr_devel(" %08x %08x\n", jmp[0], jmp[1]); | 137 | pr_devel("trampoline target %lx", ptr); |
177 | |||
178 | #ifdef __LITTLE_ENDIAN__ | ||
179 | ptr = ((unsigned long)jmp[1] << 32) + jmp[0]; | ||
180 | #else | ||
181 | ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; | ||
182 | #endif | ||
183 | 138 | ||
184 | /* This should match what was called */ | 139 | /* This should match what was called */ |
185 | if (ptr != ppc_function_entry((void *)addr)) { | 140 | if (ptr != ppc_function_entry((void *)addr)) { |
186 | printk(KERN_ERR "addr does not match %lx\n", ptr); | 141 | printk(KERN_ERR "addr %lx does not match expected %lx\n", |
142 | ptr, ppc_function_entry((void *)addr)); | ||
187 | return -EINVAL; | 143 | return -EINVAL; |
188 | } | 144 | } |
189 | 145 | ||
190 | /* | 146 | /* |
191 | * We want to nop the line, but the next line is | 147 | * Our original call site looks like: |
192 | * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) | 148 | * |
193 | * This needs to be turned to a nop too. | 149 | * bl <tramp> |
194 | */ | 150 | * ld r2,XX(r1) |
195 | if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) | 151 | * |
196 | return -EFAULT; | 152 | * Milton Miller pointed out that we can not simply nop the branch. |
197 | 153 | * If a task was preempted when calling a trace function, the nops | |
198 | if (op != 0xe8410028) { | 154 | * will remove the way to restore the TOC in r2 and the r2 TOC will |
199 | printk(KERN_ERR "Next line is not ld! (%08x)\n", op); | 155 | * get corrupted. |
200 | return -EINVAL; | 156 | * |
201 | } | 157 | * Use a b +8 to jump over the load. |
202 | |||
203 | /* | ||
204 | * Milton Miller pointed out that we can not blindly do nops. | ||
205 | * If a task was preempted when calling a trace function, | ||
206 | * the nops will remove the way to restore the TOC in r2 | ||
207 | * and the r2 TOC will get corrupted. | ||
208 | */ | ||
209 | |||
210 | /* | ||
211 | * Replace: | ||
212 | * bl <tramp> <==== will be replaced with "b 1f" | ||
213 | * ld r2,40(r1) | ||
214 | * 1: | ||
215 | */ | 158 | */ |
216 | op = 0x48000008; /* b +8 */ | 159 | op = 0x48000008; /* b +8 */ |
217 | 160 | ||
@@ -349,19 +292,24 @@ static int | |||
349 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | 292 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
350 | { | 293 | { |
351 | unsigned int op[2]; | 294 | unsigned int op[2]; |
352 | unsigned long ip = rec->ip; | 295 | void *ip = (void *)rec->ip; |
353 | 296 | ||
354 | /* read where this goes */ | 297 | /* read where this goes */ |
355 | if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) | 298 | if (probe_kernel_read(op, ip, sizeof(op))) |
356 | return -EFAULT; | 299 | return -EFAULT; |
357 | 300 | ||
358 | /* | 301 | /* |
359 | * It should be pointing to two nops or | 302 | * We expect to see: |
360 | * b +8; ld r2,40(r1) | 303 | * |
304 | * b +8 | ||
305 | * ld r2,XX(r1) | ||
306 | * | ||
307 | * The load offset is different depending on the ABI. For simplicity | ||
308 | * just mask it out when doing the compare. | ||
361 | */ | 309 | */ |
362 | if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && | 310 | if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) { |
363 | ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) { | 311 | printk(KERN_ERR "Unexpected call sequence: %x %x\n", |
364 | printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); | 312 | op[0], op[1]); |
365 | return -EINVAL; | 313 | return -EINVAL; |
366 | } | 314 | } |
367 | 315 | ||
@@ -371,23 +319,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
371 | return -EINVAL; | 319 | return -EINVAL; |
372 | } | 320 | } |
373 | 321 | ||
374 | /* create the branch to the trampoline */ | 322 | /* Ensure branch is within 24 bits */ |
375 | op[0] = create_branch((unsigned int *)ip, | 323 | if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { |
376 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); | 324 | printk(KERN_ERR "Branch out of range"); |
377 | if (!op[0]) { | ||
378 | printk(KERN_ERR "REL24 out of range!\n"); | ||
379 | return -EINVAL; | 325 | return -EINVAL; |
380 | } | 326 | } |
381 | 327 | ||
382 | /* ld r2,40(r1) */ | 328 | if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { |
383 | op[1] = 0xe8410028; | 329 | printk(KERN_ERR "REL24 out of range!\n"); |
384 | 330 | return -EINVAL; | |
385 | pr_devel("write to %lx\n", rec->ip); | 331 | } |
386 | |||
387 | if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) | ||
388 | return -EPERM; | ||
389 | |||
390 | flush_icache_range(ip, ip + 8); | ||
391 | 332 | ||
392 | return 0; | 333 | return 0; |
393 | } | 334 | } |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index b7363bd42452..a95145d7f61b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -70,16 +70,15 @@ _GLOBAL(__start) | |||
70 | /* NOP this out unconditionally */ | 70 | /* NOP this out unconditionally */ |
71 | BEGIN_FTR_SECTION | 71 | BEGIN_FTR_SECTION |
72 | FIXUP_ENDIAN | 72 | FIXUP_ENDIAN |
73 | b .__start_initialization_multiplatform | 73 | b __start_initialization_multiplatform |
74 | END_FTR_SECTION(0, 1) | 74 | END_FTR_SECTION(0, 1) |
75 | 75 | ||
76 | /* Catch branch to 0 in real mode */ | 76 | /* Catch branch to 0 in real mode */ |
77 | trap | 77 | trap |
78 | 78 | ||
79 | /* Secondary processors spin on this value until it becomes nonzero. | 79 | /* Secondary processors spin on this value until it becomes non-zero. |
80 | * When it does it contains the real address of the descriptor | 80 | * When non-zero, it contains the real address of the function the cpu |
81 | * of the function that the cpu should jump to to continue | 81 | * should jump to. |
82 | * initialization. | ||
83 | */ | 82 | */ |
84 | .balign 8 | 83 | .balign 8 |
85 | .globl __secondary_hold_spinloop | 84 | .globl __secondary_hold_spinloop |
@@ -140,16 +139,15 @@ __secondary_hold: | |||
140 | tovirt(r26,r26) | 139 | tovirt(r26,r26) |
141 | #endif | 140 | #endif |
142 | /* All secondary cpus wait here until told to start. */ | 141 | /* All secondary cpus wait here until told to start. */ |
143 | 100: ld r4,__secondary_hold_spinloop-_stext(r26) | 142 | 100: ld r12,__secondary_hold_spinloop-_stext(r26) |
144 | cmpdi 0,r4,0 | 143 | cmpdi 0,r12,0 |
145 | beq 100b | 144 | beq 100b |
146 | 145 | ||
147 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) | 146 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
148 | #ifdef CONFIG_PPC_BOOK3E | 147 | #ifdef CONFIG_PPC_BOOK3E |
149 | tovirt(r4,r4) | 148 | tovirt(r12,r12) |
150 | #endif | 149 | #endif |
151 | ld r4,0(r4) /* deref function descriptor */ | 150 | mtctr r12 |
152 | mtctr r4 | ||
153 | mr r3,r24 | 151 | mr r3,r24 |
154 | /* | 152 | /* |
155 | * it may be the case that other platforms have r4 right to | 153 | * it may be the case that other platforms have r4 right to |
@@ -186,16 +184,16 @@ _GLOBAL(generic_secondary_thread_init) | |||
186 | mr r24,r3 | 184 | mr r24,r3 |
187 | 185 | ||
188 | /* turn on 64-bit mode */ | 186 | /* turn on 64-bit mode */ |
189 | bl .enable_64b_mode | 187 | bl enable_64b_mode |
190 | 188 | ||
191 | /* get a valid TOC pointer, wherever we're mapped at */ | 189 | /* get a valid TOC pointer, wherever we're mapped at */ |
192 | bl .relative_toc | 190 | bl relative_toc |
193 | tovirt(r2,r2) | 191 | tovirt(r2,r2) |
194 | 192 | ||
195 | #ifdef CONFIG_PPC_BOOK3E | 193 | #ifdef CONFIG_PPC_BOOK3E |
196 | /* Book3E initialization */ | 194 | /* Book3E initialization */ |
197 | mr r3,r24 | 195 | mr r3,r24 |
198 | bl .book3e_secondary_thread_init | 196 | bl book3e_secondary_thread_init |
199 | #endif | 197 | #endif |
200 | b generic_secondary_common_init | 198 | b generic_secondary_common_init |
201 | 199 | ||
@@ -214,17 +212,17 @@ _GLOBAL(generic_secondary_smp_init) | |||
214 | mr r25,r4 | 212 | mr r25,r4 |
215 | 213 | ||
216 | /* turn on 64-bit mode */ | 214 | /* turn on 64-bit mode */ |
217 | bl .enable_64b_mode | 215 | bl enable_64b_mode |
218 | 216 | ||
219 | /* get a valid TOC pointer, wherever we're mapped at */ | 217 | /* get a valid TOC pointer, wherever we're mapped at */ |
220 | bl .relative_toc | 218 | bl relative_toc |
221 | tovirt(r2,r2) | 219 | tovirt(r2,r2) |
222 | 220 | ||
223 | #ifdef CONFIG_PPC_BOOK3E | 221 | #ifdef CONFIG_PPC_BOOK3E |
224 | /* Book3E initialization */ | 222 | /* Book3E initialization */ |
225 | mr r3,r24 | 223 | mr r3,r24 |
226 | mr r4,r25 | 224 | mr r4,r25 |
227 | bl .book3e_secondary_core_init | 225 | bl book3e_secondary_core_init |
228 | #endif | 226 | #endif |
229 | 227 | ||
230 | generic_secondary_common_init: | 228 | generic_secondary_common_init: |
@@ -236,7 +234,7 @@ generic_secondary_common_init: | |||
236 | ld r13,0(r13) /* Get base vaddr of paca array */ | 234 | ld r13,0(r13) /* Get base vaddr of paca array */ |
237 | #ifndef CONFIG_SMP | 235 | #ifndef CONFIG_SMP |
238 | addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ | 236 | addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ |
239 | b .kexec_wait /* wait for next kernel if !SMP */ | 237 | b kexec_wait /* wait for next kernel if !SMP */ |
240 | #else | 238 | #else |
241 | LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ | 239 | LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ |
242 | lwz r7,0(r7) /* also the max paca allocated */ | 240 | lwz r7,0(r7) /* also the max paca allocated */ |
@@ -250,7 +248,7 @@ generic_secondary_common_init: | |||
250 | blt 1b | 248 | blt 1b |
251 | 249 | ||
252 | mr r3,r24 /* not found, copy phys to r3 */ | 250 | mr r3,r24 /* not found, copy phys to r3 */ |
253 | b .kexec_wait /* next kernel might do better */ | 251 | b kexec_wait /* next kernel might do better */ |
254 | 252 | ||
255 | 2: SET_PACA(r13) | 253 | 2: SET_PACA(r13) |
256 | #ifdef CONFIG_PPC_BOOK3E | 254 | #ifdef CONFIG_PPC_BOOK3E |
@@ -264,11 +262,13 @@ generic_secondary_common_init: | |||
264 | /* See if we need to call a cpu state restore handler */ | 262 | /* See if we need to call a cpu state restore handler */ |
265 | LOAD_REG_ADDR(r23, cur_cpu_spec) | 263 | LOAD_REG_ADDR(r23, cur_cpu_spec) |
266 | ld r23,0(r23) | 264 | ld r23,0(r23) |
267 | ld r23,CPU_SPEC_RESTORE(r23) | 265 | ld r12,CPU_SPEC_RESTORE(r23) |
268 | cmpdi 0,r23,0 | 266 | cmpdi 0,r12,0 |
269 | beq 3f | 267 | beq 3f |
270 | ld r23,0(r23) | 268 | #if !defined(_CALL_ELF) || _CALL_ELF != 2 |
271 | mtctr r23 | 269 | ld r12,0(r12) |
270 | #endif | ||
271 | mtctr r12 | ||
272 | bctrl | 272 | bctrl |
273 | 273 | ||
274 | 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ | 274 | 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ |
@@ -299,7 +299,7 @@ generic_secondary_common_init: | |||
299 | * Assumes we're mapped EA == RA if the MMU is on. | 299 | * Assumes we're mapped EA == RA if the MMU is on. |
300 | */ | 300 | */ |
301 | #ifdef CONFIG_PPC_BOOK3S | 301 | #ifdef CONFIG_PPC_BOOK3S |
302 | _STATIC(__mmu_off) | 302 | __mmu_off: |
303 | mfmsr r3 | 303 | mfmsr r3 |
304 | andi. r0,r3,MSR_IR|MSR_DR | 304 | andi. r0,r3,MSR_IR|MSR_DR |
305 | beqlr | 305 | beqlr |
@@ -324,12 +324,12 @@ _STATIC(__mmu_off) | |||
324 | * DT block, r4 is a physical pointer to the kernel itself | 324 | * DT block, r4 is a physical pointer to the kernel itself |
325 | * | 325 | * |
326 | */ | 326 | */ |
327 | _GLOBAL(__start_initialization_multiplatform) | 327 | __start_initialization_multiplatform: |
328 | /* Make sure we are running in 64 bits mode */ | 328 | /* Make sure we are running in 64 bits mode */ |
329 | bl .enable_64b_mode | 329 | bl enable_64b_mode |
330 | 330 | ||
331 | /* Get TOC pointer (current runtime address) */ | 331 | /* Get TOC pointer (current runtime address) */ |
332 | bl .relative_toc | 332 | bl relative_toc |
333 | 333 | ||
334 | /* find out where we are now */ | 334 | /* find out where we are now */ |
335 | bcl 20,31,$+4 | 335 | bcl 20,31,$+4 |
@@ -342,7 +342,7 @@ _GLOBAL(__start_initialization_multiplatform) | |||
342 | */ | 342 | */ |
343 | cmpldi cr0,r5,0 | 343 | cmpldi cr0,r5,0 |
344 | beq 1f | 344 | beq 1f |
345 | b .__boot_from_prom /* yes -> prom */ | 345 | b __boot_from_prom /* yes -> prom */ |
346 | 1: | 346 | 1: |
347 | /* Save parameters */ | 347 | /* Save parameters */ |
348 | mr r31,r3 | 348 | mr r31,r3 |
@@ -354,8 +354,8 @@ _GLOBAL(__start_initialization_multiplatform) | |||
354 | #endif | 354 | #endif |
355 | 355 | ||
356 | #ifdef CONFIG_PPC_BOOK3E | 356 | #ifdef CONFIG_PPC_BOOK3E |
357 | bl .start_initialization_book3e | 357 | bl start_initialization_book3e |
358 | b .__after_prom_start | 358 | b __after_prom_start |
359 | #else | 359 | #else |
360 | /* Setup some critical 970 SPRs before switching MMU off */ | 360 | /* Setup some critical 970 SPRs before switching MMU off */ |
361 | mfspr r0,SPRN_PVR | 361 | mfspr r0,SPRN_PVR |
@@ -368,15 +368,15 @@ _GLOBAL(__start_initialization_multiplatform) | |||
368 | beq 1f | 368 | beq 1f |
369 | cmpwi r0,0x45 /* 970GX */ | 369 | cmpwi r0,0x45 /* 970GX */ |
370 | bne 2f | 370 | bne 2f |
371 | 1: bl .__cpu_preinit_ppc970 | 371 | 1: bl __cpu_preinit_ppc970 |
372 | 2: | 372 | 2: |
373 | 373 | ||
374 | /* Switch off MMU if not already off */ | 374 | /* Switch off MMU if not already off */ |
375 | bl .__mmu_off | 375 | bl __mmu_off |
376 | b .__after_prom_start | 376 | b __after_prom_start |
377 | #endif /* CONFIG_PPC_BOOK3E */ | 377 | #endif /* CONFIG_PPC_BOOK3E */ |
378 | 378 | ||
379 | _INIT_STATIC(__boot_from_prom) | 379 | __boot_from_prom: |
380 | #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE | 380 | #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE |
381 | /* Save parameters */ | 381 | /* Save parameters */ |
382 | mr r31,r3 | 382 | mr r31,r3 |
@@ -395,7 +395,7 @@ _INIT_STATIC(__boot_from_prom) | |||
395 | #ifdef CONFIG_RELOCATABLE | 395 | #ifdef CONFIG_RELOCATABLE |
396 | /* Relocate code for where we are now */ | 396 | /* Relocate code for where we are now */ |
397 | mr r3,r26 | 397 | mr r3,r26 |
398 | bl .relocate | 398 | bl relocate |
399 | #endif | 399 | #endif |
400 | 400 | ||
401 | /* Restore parameters */ | 401 | /* Restore parameters */ |
@@ -407,14 +407,14 @@ _INIT_STATIC(__boot_from_prom) | |||
407 | 407 | ||
408 | /* Do all of the interaction with OF client interface */ | 408 | /* Do all of the interaction with OF client interface */ |
409 | mr r8,r26 | 409 | mr r8,r26 |
410 | bl .prom_init | 410 | bl prom_init |
411 | #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ | 411 | #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ |
412 | 412 | ||
413 | /* We never return. We also hit that trap if trying to boot | 413 | /* We never return. We also hit that trap if trying to boot |
414 | * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ | 414 | * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ |
415 | trap | 415 | trap |
416 | 416 | ||
417 | _STATIC(__after_prom_start) | 417 | __after_prom_start: |
418 | #ifdef CONFIG_RELOCATABLE | 418 | #ifdef CONFIG_RELOCATABLE |
419 | /* process relocations for the final address of the kernel */ | 419 | /* process relocations for the final address of the kernel */ |
420 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ | 420 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ |
@@ -424,7 +424,7 @@ _STATIC(__after_prom_start) | |||
424 | bne 1f | 424 | bne 1f |
425 | add r25,r25,r26 | 425 | add r25,r25,r26 |
426 | 1: mr r3,r25 | 426 | 1: mr r3,r25 |
427 | bl .relocate | 427 | bl relocate |
428 | #endif | 428 | #endif |
429 | 429 | ||
430 | /* | 430 | /* |
@@ -464,12 +464,12 @@ _STATIC(__after_prom_start) | |||
464 | lis r5,(copy_to_here - _stext)@ha | 464 | lis r5,(copy_to_here - _stext)@ha |
465 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ | 465 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ |
466 | 466 | ||
467 | bl .copy_and_flush /* copy the first n bytes */ | 467 | bl copy_and_flush /* copy the first n bytes */ |
468 | /* this includes the code being */ | 468 | /* this includes the code being */ |
469 | /* executed here. */ | 469 | /* executed here. */ |
470 | addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ | 470 | addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ |
471 | addi r8,r8,(4f - _stext)@l /* that we just made */ | 471 | addi r12,r8,(4f - _stext)@l /* that we just made */ |
472 | mtctr r8 | 472 | mtctr r12 |
473 | bctr | 473 | bctr |
474 | 474 | ||
475 | .balign 8 | 475 | .balign 8 |
@@ -478,9 +478,9 @@ p_end: .llong _end - _stext | |||
478 | 4: /* Now copy the rest of the kernel up to _end */ | 478 | 4: /* Now copy the rest of the kernel up to _end */ |
479 | addis r5,r26,(p_end - _stext)@ha | 479 | addis r5,r26,(p_end - _stext)@ha |
480 | ld r5,(p_end - _stext)@l(r5) /* get _end */ | 480 | ld r5,(p_end - _stext)@l(r5) /* get _end */ |
481 | 5: bl .copy_and_flush /* copy the rest */ | 481 | 5: bl copy_and_flush /* copy the rest */ |
482 | 482 | ||
483 | 9: b .start_here_multiplatform | 483 | 9: b start_here_multiplatform |
484 | 484 | ||
485 | /* | 485 | /* |
486 | * Copy routine used to copy the kernel to start at physical address 0 | 486 | * Copy routine used to copy the kernel to start at physical address 0 |
@@ -544,7 +544,7 @@ __secondary_start_pmac_0: | |||
544 | 544 | ||
545 | _GLOBAL(pmac_secondary_start) | 545 | _GLOBAL(pmac_secondary_start) |
546 | /* turn on 64-bit mode */ | 546 | /* turn on 64-bit mode */ |
547 | bl .enable_64b_mode | 547 | bl enable_64b_mode |
548 | 548 | ||
549 | li r0,0 | 549 | li r0,0 |
550 | mfspr r3,SPRN_HID4 | 550 | mfspr r3,SPRN_HID4 |
@@ -556,11 +556,11 @@ _GLOBAL(pmac_secondary_start) | |||
556 | slbia | 556 | slbia |
557 | 557 | ||
558 | /* get TOC pointer (real address) */ | 558 | /* get TOC pointer (real address) */ |
559 | bl .relative_toc | 559 | bl relative_toc |
560 | tovirt(r2,r2) | 560 | tovirt(r2,r2) |
561 | 561 | ||
562 | /* Copy some CPU settings from CPU 0 */ | 562 | /* Copy some CPU settings from CPU 0 */ |
563 | bl .__restore_cpu_ppc970 | 563 | bl __restore_cpu_ppc970 |
564 | 564 | ||
565 | /* pSeries do that early though I don't think we really need it */ | 565 | /* pSeries do that early though I don't think we really need it */ |
566 | mfmsr r3 | 566 | mfmsr r3 |
@@ -619,7 +619,7 @@ __secondary_start: | |||
619 | std r14,PACAKSAVE(r13) | 619 | std r14,PACAKSAVE(r13) |
620 | 620 | ||
621 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ | 621 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ |
622 | bl .early_setup_secondary | 622 | bl early_setup_secondary |
623 | 623 | ||
624 | /* | 624 | /* |
625 | * setup the new stack pointer, but *don't* use this until | 625 | * setup the new stack pointer, but *don't* use this until |
@@ -639,7 +639,7 @@ __secondary_start: | |||
639 | stb r0,PACAIRQHAPPENED(r13) | 639 | stb r0,PACAIRQHAPPENED(r13) |
640 | 640 | ||
641 | /* enable MMU and jump to start_secondary */ | 641 | /* enable MMU and jump to start_secondary */ |
642 | LOAD_REG_ADDR(r3, .start_secondary_prolog) | 642 | LOAD_REG_ADDR(r3, start_secondary_prolog) |
643 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) | 643 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) |
644 | 644 | ||
645 | mtspr SPRN_SRR0,r3 | 645 | mtspr SPRN_SRR0,r3 |
@@ -652,11 +652,11 @@ __secondary_start: | |||
652 | * zero the stack back-chain pointer and get the TOC virtual address | 652 | * zero the stack back-chain pointer and get the TOC virtual address |
653 | * before going into C code. | 653 | * before going into C code. |
654 | */ | 654 | */ |
655 | _GLOBAL(start_secondary_prolog) | 655 | start_secondary_prolog: |
656 | ld r2,PACATOC(r13) | 656 | ld r2,PACATOC(r13) |
657 | li r3,0 | 657 | li r3,0 |
658 | std r3,0(r1) /* Zero the stack frame pointer */ | 658 | std r3,0(r1) /* Zero the stack frame pointer */ |
659 | bl .start_secondary | 659 | bl start_secondary |
660 | b . | 660 | b . |
661 | /* | 661 | /* |
662 | * Reset stack pointer and call start_secondary | 662 | * Reset stack pointer and call start_secondary |
@@ -667,14 +667,14 @@ _GLOBAL(start_secondary_resume) | |||
667 | ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ | 667 | ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ |
668 | li r3,0 | 668 | li r3,0 |
669 | std r3,0(r1) /* Zero the stack frame pointer */ | 669 | std r3,0(r1) /* Zero the stack frame pointer */ |
670 | bl .start_secondary | 670 | bl start_secondary |
671 | b . | 671 | b . |
672 | #endif | 672 | #endif |
673 | 673 | ||
674 | /* | 674 | /* |
675 | * This subroutine clobbers r11 and r12 | 675 | * This subroutine clobbers r11 and r12 |
676 | */ | 676 | */ |
677 | _GLOBAL(enable_64b_mode) | 677 | enable_64b_mode: |
678 | mfmsr r11 /* grab the current MSR */ | 678 | mfmsr r11 /* grab the current MSR */ |
679 | #ifdef CONFIG_PPC_BOOK3E | 679 | #ifdef CONFIG_PPC_BOOK3E |
680 | oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ | 680 | oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ |
@@ -715,9 +715,9 @@ p_toc: .llong __toc_start + 0x8000 - 0b | |||
715 | /* | 715 | /* |
716 | * This is where the main kernel code starts. | 716 | * This is where the main kernel code starts. |
717 | */ | 717 | */ |
718 | _INIT_STATIC(start_here_multiplatform) | 718 | start_here_multiplatform: |
719 | /* set up the TOC */ | 719 | /* set up the TOC */ |
720 | bl .relative_toc | 720 | bl relative_toc |
721 | tovirt(r2,r2) | 721 | tovirt(r2,r2) |
722 | 722 | ||
723 | /* Clear out the BSS. It may have been done in prom_init, | 723 | /* Clear out the BSS. It may have been done in prom_init, |
@@ -776,9 +776,9 @@ _INIT_STATIC(start_here_multiplatform) | |||
776 | 776 | ||
777 | /* Restore parameters passed from prom_init/kexec */ | 777 | /* Restore parameters passed from prom_init/kexec */ |
778 | mr r3,r31 | 778 | mr r3,r31 |
779 | bl .early_setup /* also sets r13 and SPRG_PACA */ | 779 | bl early_setup /* also sets r13 and SPRG_PACA */ |
780 | 780 | ||
781 | LOAD_REG_ADDR(r3, .start_here_common) | 781 | LOAD_REG_ADDR(r3, start_here_common) |
782 | ld r4,PACAKMSR(r13) | 782 | ld r4,PACAKMSR(r13) |
783 | mtspr SPRN_SRR0,r3 | 783 | mtspr SPRN_SRR0,r3 |
784 | mtspr SPRN_SRR1,r4 | 784 | mtspr SPRN_SRR1,r4 |
@@ -786,7 +786,8 @@ _INIT_STATIC(start_here_multiplatform) | |||
786 | b . /* prevent speculative execution */ | 786 | b . /* prevent speculative execution */ |
787 | 787 | ||
788 | /* This is where all platforms converge execution */ | 788 | /* This is where all platforms converge execution */ |
789 | _INIT_GLOBAL(start_here_common) | 789 | |
790 | start_here_common: | ||
790 | /* relocation is on at this point */ | 791 | /* relocation is on at this point */ |
791 | std r1,PACAKSAVE(r13) | 792 | std r1,PACAKSAVE(r13) |
792 | 793 | ||
@@ -794,7 +795,7 @@ _INIT_GLOBAL(start_here_common) | |||
794 | ld r2,PACATOC(r13) | 795 | ld r2,PACATOC(r13) |
795 | 796 | ||
796 | /* Do more system initializations in virtual mode */ | 797 | /* Do more system initializations in virtual mode */ |
797 | bl .setup_system | 798 | bl setup_system |
798 | 799 | ||
799 | /* Mark interrupts soft and hard disabled (they might be enabled | 800 | /* Mark interrupts soft and hard disabled (they might be enabled |
800 | * in the PACA when doing hotplug) | 801 | * in the PACA when doing hotplug) |
@@ -805,7 +806,7 @@ _INIT_GLOBAL(start_here_common) | |||
805 | stb r0,PACAIRQHAPPENED(r13) | 806 | stb r0,PACAIRQHAPPENED(r13) |
806 | 807 | ||
807 | /* Generic kernel entry */ | 808 | /* Generic kernel entry */ |
808 | bl .start_kernel | 809 | bl start_kernel |
809 | 810 | ||
810 | /* Not reached */ | 811 | /* Not reached */ |
811 | BUG_OPCODE | 812 | BUG_OPCODE |
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index b0a1792279bb..0bb5918faaaf 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c | |||
@@ -72,7 +72,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
72 | * If so, DABR will be populated in single_step_dabr_instruction(). | 72 | * If so, DABR will be populated in single_step_dabr_instruction(). |
73 | */ | 73 | */ |
74 | if (current->thread.last_hit_ubp != bp) | 74 | if (current->thread.last_hit_ubp != bp) |
75 | set_breakpoint(info); | 75 | __set_breakpoint(info); |
76 | 76 | ||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
@@ -198,7 +198,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) | |||
198 | 198 | ||
199 | info = counter_arch_bp(tsk->thread.last_hit_ubp); | 199 | info = counter_arch_bp(tsk->thread.last_hit_ubp); |
200 | regs->msr &= ~MSR_SE; | 200 | regs->msr &= ~MSR_SE; |
201 | set_breakpoint(info); | 201 | __set_breakpoint(info); |
202 | tsk->thread.last_hit_ubp = NULL; | 202 | tsk->thread.last_hit_ubp = NULL; |
203 | } | 203 | } |
204 | 204 | ||
@@ -284,7 +284,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
284 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) | 284 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) |
285 | perf_bp_event(bp, regs); | 285 | perf_bp_event(bp, regs); |
286 | 286 | ||
287 | set_breakpoint(info); | 287 | __set_breakpoint(info); |
288 | out: | 288 | out: |
289 | rcu_read_unlock(); | 289 | rcu_read_unlock(); |
290 | return rc; | 290 | return rc; |
@@ -316,7 +316,7 @@ int __kprobes single_step_dabr_instruction(struct die_args *args) | |||
316 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) | 316 | if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ)) |
317 | perf_bp_event(bp, regs); | 317 | perf_bp_event(bp, regs); |
318 | 318 | ||
319 | set_breakpoint(info); | 319 | __set_breakpoint(info); |
320 | current->thread.last_hit_ubp = NULL; | 320 | current->thread.last_hit_ubp = NULL; |
321 | 321 | ||
322 | /* | 322 | /* |
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index bfb73cc209ce..48c21acef915 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S | |||
@@ -43,7 +43,7 @@ _GLOBAL(\name) | |||
43 | */ | 43 | */ |
44 | #ifdef CONFIG_TRACE_IRQFLAGS | 44 | #ifdef CONFIG_TRACE_IRQFLAGS |
45 | stdu r1,-128(r1) | 45 | stdu r1,-128(r1) |
46 | bl .trace_hardirqs_on | 46 | bl trace_hardirqs_on |
47 | addi r1,r1,128 | 47 | addi r1,r1,128 |
48 | #endif | 48 | #endif |
49 | li r0,1 | 49 | li r0,1 |
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index e3edaa189911..f57a19348bdd 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S | |||
@@ -46,7 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) | |||
46 | mflr r0 | 46 | mflr r0 |
47 | std r0,16(r1) | 47 | std r0,16(r1) |
48 | stdu r1,-128(r1) | 48 | stdu r1,-128(r1) |
49 | bl .trace_hardirqs_on | 49 | bl trace_hardirqs_on |
50 | addi r1,r1,128 | 50 | addi r1,r1,128 |
51 | ld r0,16(r1) | 51 | ld r0,16(r1) |
52 | mtlr r0 | 52 | mtlr r0 |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index c3ab86975614..2480256272d4 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -39,6 +39,10 @@ | |||
39 | * Pass requested state in r3: | 39 | * Pass requested state in r3: |
40 | * 0 - nap | 40 | * 0 - nap |
41 | * 1 - sleep | 41 | * 1 - sleep |
42 | * | ||
43 | * To check IRQ_HAPPENED in r4 | ||
44 | * 0 - don't check | ||
45 | * 1 - check | ||
42 | */ | 46 | */ |
43 | _GLOBAL(power7_powersave_common) | 47 | _GLOBAL(power7_powersave_common) |
44 | /* Use r3 to pass state nap/sleep/winkle */ | 48 | /* Use r3 to pass state nap/sleep/winkle */ |
@@ -58,7 +62,7 @@ _GLOBAL(power7_powersave_common) | |||
58 | /* Make sure FPU, VSX etc... are flushed as we may lose | 62 | /* Make sure FPU, VSX etc... are flushed as we may lose |
59 | * state when going to nap mode | 63 | * state when going to nap mode |
60 | */ | 64 | */ |
61 | bl .discard_lazy_cpu_state | 65 | bl discard_lazy_cpu_state |
62 | #endif /* CONFIG_SMP */ | 66 | #endif /* CONFIG_SMP */ |
63 | 67 | ||
64 | /* Hard disable interrupts */ | 68 | /* Hard disable interrupts */ |
@@ -71,6 +75,8 @@ _GLOBAL(power7_powersave_common) | |||
71 | lbz r0,PACAIRQHAPPENED(r13) | 75 | lbz r0,PACAIRQHAPPENED(r13) |
72 | cmpwi cr0,r0,0 | 76 | cmpwi cr0,r0,0 |
73 | beq 1f | 77 | beq 1f |
78 | cmpwi cr0,r4,0 | ||
79 | beq 1f | ||
74 | addi r1,r1,INT_FRAME_SIZE | 80 | addi r1,r1,INT_FRAME_SIZE |
75 | ld r0,16(r1) | 81 | ld r0,16(r1) |
76 | mtlr r0 | 82 | mtlr r0 |
@@ -114,15 +120,18 @@ _GLOBAL(power7_idle) | |||
114 | lwz r4,ADDROFF(powersave_nap)(r3) | 120 | lwz r4,ADDROFF(powersave_nap)(r3) |
115 | cmpwi 0,r4,0 | 121 | cmpwi 0,r4,0 |
116 | beqlr | 122 | beqlr |
123 | li r3, 1 | ||
117 | /* fall through */ | 124 | /* fall through */ |
118 | 125 | ||
119 | _GLOBAL(power7_nap) | 126 | _GLOBAL(power7_nap) |
127 | mr r4,r3 | ||
120 | li r3,0 | 128 | li r3,0 |
121 | b power7_powersave_common | 129 | b power7_powersave_common |
122 | /* No return */ | 130 | /* No return */ |
123 | 131 | ||
124 | _GLOBAL(power7_sleep) | 132 | _GLOBAL(power7_sleep) |
125 | li r3,1 | 133 | li r3,1 |
134 | li r4,0 | ||
126 | b power7_powersave_common | 135 | b power7_powersave_common |
127 | /* No return */ | 136 | /* No return */ |
128 | 137 | ||
@@ -168,7 +177,7 @@ _GLOBAL(power7_wakeup_loss) | |||
168 | _GLOBAL(power7_wakeup_noloss) | 177 | _GLOBAL(power7_wakeup_noloss) |
169 | lbz r0,PACA_NAPSTATELOST(r13) | 178 | lbz r0,PACA_NAPSTATELOST(r13) |
170 | cmpwi r0,0 | 179 | cmpwi r0,0 |
171 | bne .power7_wakeup_loss | 180 | bne power7_wakeup_loss |
172 | ld r1,PACAR1(r13) | 181 | ld r1,PACAR1(r13) |
173 | ld r4,_MSR(r1) | 182 | ld r4,_MSR(r1) |
174 | ld r5,_NIP(r1) | 183 | ld r5,_NIP(r1) |
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 40bd7bd4e19a..936258881c98 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
@@ -48,6 +48,9 @@ static struct of_device_id legacy_serial_parents[] __initdata = { | |||
48 | static unsigned int legacy_serial_count; | 48 | static unsigned int legacy_serial_count; |
49 | static int legacy_serial_console = -1; | 49 | static int legacy_serial_console = -1; |
50 | 50 | ||
51 | static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | | ||
52 | UPF_SHARE_IRQ | UPF_FIXED_PORT; | ||
53 | |||
51 | static unsigned int tsi_serial_in(struct uart_port *p, int offset) | 54 | static unsigned int tsi_serial_in(struct uart_port *p, int offset) |
52 | { | 55 | { |
53 | unsigned int tmp; | 56 | unsigned int tmp; |
@@ -71,8 +74,9 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
71 | phys_addr_t taddr, unsigned long irq, | 74 | phys_addr_t taddr, unsigned long irq, |
72 | upf_t flags, int irq_check_parent) | 75 | upf_t flags, int irq_check_parent) |
73 | { | 76 | { |
74 | const __be32 *clk, *spd; | 77 | const __be32 *clk, *spd, *rs; |
75 | u32 clock = BASE_BAUD * 16; | 78 | u32 clock = BASE_BAUD * 16; |
79 | u32 shift = 0; | ||
76 | int index; | 80 | int index; |
77 | 81 | ||
78 | /* get clock freq. if present */ | 82 | /* get clock freq. if present */ |
@@ -83,6 +87,11 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
83 | /* get default speed if present */ | 87 | /* get default speed if present */ |
84 | spd = of_get_property(np, "current-speed", NULL); | 88 | spd = of_get_property(np, "current-speed", NULL); |
85 | 89 | ||
90 | /* get register shift if present */ | ||
91 | rs = of_get_property(np, "reg-shift", NULL); | ||
92 | if (rs && *rs) | ||
93 | shift = be32_to_cpup(rs); | ||
94 | |||
86 | /* If we have a location index, then try to use it */ | 95 | /* If we have a location index, then try to use it */ |
87 | if (want_index >= 0 && want_index < MAX_LEGACY_SERIAL_PORTS) | 96 | if (want_index >= 0 && want_index < MAX_LEGACY_SERIAL_PORTS) |
88 | index = want_index; | 97 | index = want_index; |
@@ -126,6 +135,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
126 | legacy_serial_ports[index].uartclk = clock; | 135 | legacy_serial_ports[index].uartclk = clock; |
127 | legacy_serial_ports[index].irq = irq; | 136 | legacy_serial_ports[index].irq = irq; |
128 | legacy_serial_ports[index].flags = flags; | 137 | legacy_serial_ports[index].flags = flags; |
138 | legacy_serial_ports[index].regshift = shift; | ||
129 | legacy_serial_infos[index].taddr = taddr; | 139 | legacy_serial_infos[index].taddr = taddr; |
130 | legacy_serial_infos[index].np = of_node_get(np); | 140 | legacy_serial_infos[index].np = of_node_get(np); |
131 | legacy_serial_infos[index].clock = clock; | 141 | legacy_serial_infos[index].clock = clock; |
@@ -153,8 +163,6 @@ static int __init add_legacy_soc_port(struct device_node *np, | |||
153 | { | 163 | { |
154 | u64 addr; | 164 | u64 addr; |
155 | const __be32 *addrp; | 165 | const __be32 *addrp; |
156 | upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ | ||
157 | | UPF_FIXED_PORT; | ||
158 | struct device_node *tsi = of_get_parent(np); | 166 | struct device_node *tsi = of_get_parent(np); |
159 | 167 | ||
160 | /* We only support ports that have a clock frequency properly | 168 | /* We only support ports that have a clock frequency properly |
@@ -163,9 +171,8 @@ static int __init add_legacy_soc_port(struct device_node *np, | |||
163 | if (of_get_property(np, "clock-frequency", NULL) == NULL) | 171 | if (of_get_property(np, "clock-frequency", NULL) == NULL) |
164 | return -1; | 172 | return -1; |
165 | 173 | ||
166 | /* if reg-shift or offset, don't try to use it */ | 174 | /* if reg-offset don't try to use it */ |
167 | if ((of_get_property(np, "reg-shift", NULL) != NULL) || | 175 | if ((of_get_property(np, "reg-offset", NULL) != NULL)) |
168 | (of_get_property(np, "reg-offset", NULL) != NULL)) | ||
169 | return -1; | 176 | return -1; |
170 | 177 | ||
171 | /* if rtas uses this device, don't try to use it as well */ | 178 | /* if rtas uses this device, don't try to use it as well */ |
@@ -185,9 +192,11 @@ static int __init add_legacy_soc_port(struct device_node *np, | |||
185 | * IO port value. It will be fixed up later along with the irq | 192 | * IO port value. It will be fixed up later along with the irq |
186 | */ | 193 | */ |
187 | if (tsi && !strcmp(tsi->type, "tsi-bridge")) | 194 | if (tsi && !strcmp(tsi->type, "tsi-bridge")) |
188 | return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0); | 195 | return add_legacy_port(np, -1, UPIO_TSI, addr, addr, |
196 | NO_IRQ, legacy_port_flags, 0); | ||
189 | else | 197 | else |
190 | return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); | 198 | return add_legacy_port(np, -1, UPIO_MEM, addr, addr, |
199 | NO_IRQ, legacy_port_flags, 0); | ||
191 | } | 200 | } |
192 | 201 | ||
193 | static int __init add_legacy_isa_port(struct device_node *np, | 202 | static int __init add_legacy_isa_port(struct device_node *np, |
@@ -233,7 +242,7 @@ static int __init add_legacy_isa_port(struct device_node *np, | |||
233 | 242 | ||
234 | /* Add port, irq will be dealt with later */ | 243 | /* Add port, irq will be dealt with later */ |
235 | return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), | 244 | return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), |
236 | taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0); | 245 | taddr, NO_IRQ, legacy_port_flags, 0); |
237 | 246 | ||
238 | } | 247 | } |
239 | 248 | ||
@@ -306,7 +315,7 @@ static int __init add_legacy_pci_port(struct device_node *np, | |||
306 | * IO port value. It will be fixed up later along with the irq | 315 | * IO port value. It will be fixed up later along with the irq |
307 | */ | 316 | */ |
308 | return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, | 317 | return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, |
309 | UPF_BOOT_AUTOCONF, np != pci_dev); | 318 | legacy_port_flags, np != pci_dev); |
310 | } | 319 | } |
311 | #endif | 320 | #endif |
312 | 321 | ||
@@ -315,17 +324,20 @@ static void __init setup_legacy_serial_console(int console) | |||
315 | struct legacy_serial_info *info = &legacy_serial_infos[console]; | 324 | struct legacy_serial_info *info = &legacy_serial_infos[console]; |
316 | struct plat_serial8250_port *port = &legacy_serial_ports[console]; | 325 | struct plat_serial8250_port *port = &legacy_serial_ports[console]; |
317 | void __iomem *addr; | 326 | void __iomem *addr; |
327 | unsigned int stride; | ||
328 | |||
329 | stride = 1 << port->regshift; | ||
318 | 330 | ||
319 | /* Check if a translated MMIO address has been found */ | 331 | /* Check if a translated MMIO address has been found */ |
320 | if (info->taddr) { | 332 | if (info->taddr) { |
321 | addr = ioremap(info->taddr, 0x1000); | 333 | addr = ioremap(info->taddr, 0x1000); |
322 | if (addr == NULL) | 334 | if (addr == NULL) |
323 | return; | 335 | return; |
324 | udbg_uart_init_mmio(addr, 1); | 336 | udbg_uart_init_mmio(addr, stride); |
325 | } else { | 337 | } else { |
326 | /* Check if it's PIO and we support untranslated PIO */ | 338 | /* Check if it's PIO and we support untranslated PIO */ |
327 | if (port->iotype == UPIO_PORT && isa_io_special) | 339 | if (port->iotype == UPIO_PORT && isa_io_special) |
328 | udbg_uart_init_pio(port->iobase, 1); | 340 | udbg_uart_init_pio(port->iobase, stride); |
329 | else | 341 | else |
330 | return; | 342 | return; |
331 | } | 343 | } |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 3d0249599d52..4e314b90c75d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -34,7 +34,7 @@ _GLOBAL(call_do_softirq) | |||
34 | std r0,16(r1) | 34 | std r0,16(r1) |
35 | stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) | 35 | stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) |
36 | mr r1,r3 | 36 | mr r1,r3 |
37 | bl .__do_softirq | 37 | bl __do_softirq |
38 | ld r1,0(r1) | 38 | ld r1,0(r1) |
39 | ld r0,16(r1) | 39 | ld r0,16(r1) |
40 | mtlr r0 | 40 | mtlr r0 |
@@ -45,7 +45,7 @@ _GLOBAL(call_do_irq) | |||
45 | std r0,16(r1) | 45 | std r0,16(r1) |
46 | stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) | 46 | stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) |
47 | mr r1,r4 | 47 | mr r1,r4 |
48 | bl .__do_irq | 48 | bl __do_irq |
49 | ld r1,0(r1) | 49 | ld r1,0(r1) |
50 | ld r0,16(r1) | 50 | ld r0,16(r1) |
51 | mtlr r0 | 51 | mtlr r0 |
@@ -506,7 +506,7 @@ _GLOBAL(kexec_smp_wait) | |||
506 | stb r4,PACAKEXECSTATE(r13) | 506 | stb r4,PACAKEXECSTATE(r13) |
507 | SYNC | 507 | SYNC |
508 | 508 | ||
509 | b .kexec_wait | 509 | b kexec_wait |
510 | 510 | ||
511 | /* | 511 | /* |
512 | * switch to real mode (turn mmu off) | 512 | * switch to real mode (turn mmu off) |
@@ -576,7 +576,7 @@ _GLOBAL(kexec_sequence) | |||
576 | 576 | ||
577 | /* copy dest pages, flush whole dest image */ | 577 | /* copy dest pages, flush whole dest image */ |
578 | mr r3,r29 | 578 | mr r3,r29 |
579 | bl .kexec_copy_flush /* (image) */ | 579 | bl kexec_copy_flush /* (image) */ |
580 | 580 | ||
581 | /* turn off mmu */ | 581 | /* turn off mmu */ |
582 | bl real_mode | 582 | bl real_mode |
@@ -586,7 +586,7 @@ _GLOBAL(kexec_sequence) | |||
586 | mr r4,r30 /* start, aka phys mem offset */ | 586 | mr r4,r30 /* start, aka phys mem offset */ |
587 | li r5,0x100 | 587 | li r5,0x100 |
588 | li r6,0 | 588 | li r6,0 |
589 | bl .copy_and_flush /* (dest, src, copy limit, start offset) */ | 589 | bl copy_and_flush /* (dest, src, copy limit, start offset) */ |
590 | 1: /* assume normal blr return */ | 590 | 1: /* assume normal blr return */ |
591 | 591 | ||
592 | /* release other cpus to the new kernel secondary start at 0x60 */ | 592 | /* release other cpus to the new kernel secondary start at 0x60 */ |
@@ -595,8 +595,12 @@ _GLOBAL(kexec_sequence) | |||
595 | stw r6,kexec_flag-1b(5) | 595 | stw r6,kexec_flag-1b(5) |
596 | 596 | ||
597 | /* clear out hardware hash page table and tlb */ | 597 | /* clear out hardware hash page table and tlb */ |
598 | ld r5,0(r27) /* deref function descriptor */ | 598 | #if !defined(_CALL_ELF) || _CALL_ELF != 2 |
599 | mtctr r5 | 599 | ld r12,0(r27) /* deref function descriptor */ |
600 | #else | ||
601 | mr r12,r27 | ||
602 | #endif | ||
603 | mtctr r12 | ||
600 | bctrl /* ppc_md.hpte_clear_all(void); */ | 604 | bctrl /* ppc_md.hpte_clear_all(void); */ |
601 | 605 | ||
602 | /* | 606 | /* |
@@ -630,3 +634,31 @@ _GLOBAL(kexec_sequence) | |||
630 | li r5,0 | 634 | li r5,0 |
631 | blr /* image->start(physid, image->start, 0); */ | 635 | blr /* image->start(physid, image->start, 0); */ |
632 | #endif /* CONFIG_KEXEC */ | 636 | #endif /* CONFIG_KEXEC */ |
637 | |||
638 | #ifdef CONFIG_MODULES | ||
639 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
640 | |||
641 | #ifdef CONFIG_MODVERSIONS | ||
642 | .weak __crc_TOC. | ||
643 | .section "___kcrctab+TOC.","a" | ||
644 | .globl __kcrctab_TOC. | ||
645 | __kcrctab_TOC.: | ||
646 | .llong __crc_TOC. | ||
647 | #endif | ||
648 | |||
649 | /* | ||
650 | * Export a fake .TOC. since both modpost and depmod will complain otherwise. | ||
651 | * Both modpost and depmod strip the leading . so we do the same here. | ||
652 | */ | ||
653 | .section "__ksymtab_strings","a" | ||
654 | __kstrtab_TOC.: | ||
655 | .asciz "TOC." | ||
656 | |||
657 | .section "___ksymtab+TOC.","a" | ||
658 | /* This symbol name is important: it's used by modpost to find exported syms */ | ||
659 | .globl __ksymtab_TOC. | ||
660 | __ksymtab_TOC.: | ||
661 | .llong 0 /* .value */ | ||
662 | .llong __kstrtab_TOC. | ||
663 | #endif /* ELFv2 */ | ||
664 | #endif /* MODULES */ | ||
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 12664c130d73..077d2ce6c5a7 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/ftrace.h> | 23 | #include <linux/ftrace.h> |
24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
25 | #include <linux/uaccess.h> | ||
25 | #include <asm/module.h> | 26 | #include <asm/module.h> |
26 | #include <asm/firmware.h> | 27 | #include <asm/firmware.h> |
27 | #include <asm/code-patching.h> | 28 | #include <asm/code-patching.h> |
@@ -41,46 +42,170 @@ | |||
41 | #define DEBUGP(fmt , ...) | 42 | #define DEBUGP(fmt , ...) |
42 | #endif | 43 | #endif |
43 | 44 | ||
45 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
46 | #define R2_STACK_OFFSET 24 | ||
47 | |||
48 | /* An address is simply the address of the function. */ | ||
49 | typedef unsigned long func_desc_t; | ||
50 | |||
51 | static func_desc_t func_desc(unsigned long addr) | ||
52 | { | ||
53 | return addr; | ||
54 | } | ||
55 | static unsigned long func_addr(unsigned long addr) | ||
56 | { | ||
57 | return addr; | ||
58 | } | ||
59 | static unsigned long stub_func_addr(func_desc_t func) | ||
60 | { | ||
61 | return func; | ||
62 | } | ||
63 | |||
64 | /* PowerPC64 specific values for the Elf64_Sym st_other field. */ | ||
65 | #define STO_PPC64_LOCAL_BIT 5 | ||
66 | #define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT) | ||
67 | #define PPC64_LOCAL_ENTRY_OFFSET(other) \ | ||
68 | (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2) | ||
69 | |||
70 | static unsigned int local_entry_offset(const Elf64_Sym *sym) | ||
71 | { | ||
72 | /* sym->st_other indicates offset to local entry point | ||
73 | * (otherwise it will assume r12 is the address of the start | ||
74 | * of function and try to derive r2 from it). */ | ||
75 | return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); | ||
76 | } | ||
77 | #else | ||
78 | #define R2_STACK_OFFSET 40 | ||
79 | |||
80 | /* An address is address of the OPD entry, which contains address of fn. */ | ||
81 | typedef struct ppc64_opd_entry func_desc_t; | ||
82 | |||
83 | static func_desc_t func_desc(unsigned long addr) | ||
84 | { | ||
85 | return *(struct ppc64_opd_entry *)addr; | ||
86 | } | ||
87 | static unsigned long func_addr(unsigned long addr) | ||
88 | { | ||
89 | return func_desc(addr).funcaddr; | ||
90 | } | ||
91 | static unsigned long stub_func_addr(func_desc_t func) | ||
92 | { | ||
93 | return func.funcaddr; | ||
94 | } | ||
95 | static unsigned int local_entry_offset(const Elf64_Sym *sym) | ||
96 | { | ||
97 | return 0; | ||
98 | } | ||
99 | #endif | ||
100 | |||
44 | /* Like PPC32, we need little trampolines to do > 24-bit jumps (into | 101 | /* Like PPC32, we need little trampolines to do > 24-bit jumps (into |
45 | the kernel itself). But on PPC64, these need to be used for every | 102 | the kernel itself). But on PPC64, these need to be used for every |
46 | jump, actually, to reset r2 (TOC+0x8000). */ | 103 | jump, actually, to reset r2 (TOC+0x8000). */ |
47 | struct ppc64_stub_entry | 104 | struct ppc64_stub_entry |
48 | { | 105 | { |
49 | /* 28 byte jump instruction sequence (7 instructions) */ | 106 | /* 28 byte jump instruction sequence (7 instructions). We only |
50 | unsigned char jump[28]; | 107 | * need 6 instructions on ABIv2 but we always allocate 7 so |
51 | unsigned char unused[4]; | 108 | * so we don't have to modify the trampoline load instruction. */ |
109 | u32 jump[7]; | ||
110 | u32 unused; | ||
52 | /* Data for the above code */ | 111 | /* Data for the above code */ |
53 | struct ppc64_opd_entry opd; | 112 | func_desc_t funcdata; |
54 | }; | 113 | }; |
55 | 114 | ||
56 | /* We use a stub to fix up r2 (TOC ptr) and to jump to the (external) | 115 | /* |
57 | function which may be more than 24-bits away. We could simply | 116 | * PPC64 uses 24 bit jumps, but we need to jump into other modules or |
58 | patch the new r2 value and function pointer into the stub, but it's | 117 | * the kernel which may be further. So we jump to a stub. |
59 | significantly shorter to put these values at the end of the stub | 118 | * |
60 | code, and patch the stub address (32-bits relative to the TOC ptr, | 119 | * For ELFv1 we need to use this to set up the new r2 value (aka TOC |
61 | r2) into the stub. */ | 120 | * pointer). For ELFv2 it's the callee's responsibility to set up the |
62 | static struct ppc64_stub_entry ppc64_stub = | 121 | * new r2, but for both we need to save the old r2. |
63 | { .jump = { | 122 | * |
64 | #ifdef __LITTLE_ENDIAN__ | 123 | * We could simply patch the new r2 value and function pointer into |
65 | 0x00, 0x00, 0x82, 0x3d, /* addis r12,r2, <high> */ | 124 | * the stub, but it's significantly shorter to put these values at the |
66 | 0x00, 0x00, 0x8c, 0x39, /* addi r12,r12, <low> */ | 125 | * end of the stub code, and patch the stub address (32-bits relative |
67 | /* Save current r2 value in magic place on the stack. */ | 126 | * to the TOC ptr, r2) into the stub. |
68 | 0x28, 0x00, 0x41, 0xf8, /* std r2,40(r1) */ | 127 | */ |
69 | 0x20, 0x00, 0x6c, 0xe9, /* ld r11,32(r12) */ | 128 | |
70 | 0x28, 0x00, 0x4c, 0xe8, /* ld r2,40(r12) */ | 129 | static u32 ppc64_stub_insns[] = { |
71 | 0xa6, 0x03, 0x69, 0x7d, /* mtctr r11 */ | 130 | 0x3d620000, /* addis r11,r2, <high> */ |
72 | 0x20, 0x04, 0x80, 0x4e /* bctr */ | 131 | 0x396b0000, /* addi r11,r11, <low> */ |
73 | #else | ||
74 | 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */ | ||
75 | 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */ | ||
76 | /* Save current r2 value in magic place on the stack. */ | 132 | /* Save current r2 value in magic place on the stack. */ |
77 | 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */ | 133 | 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ |
78 | 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */ | 134 | 0xe98b0020, /* ld r12,32(r11) */ |
79 | 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */ | 135 | #if !defined(_CALL_ELF) || _CALL_ELF != 2 |
80 | 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */ | 136 | /* Set up new r2 from function descriptor */ |
81 | 0x4e, 0x80, 0x04, 0x20 /* bctr */ | 137 | 0xe84b0028, /* ld r2,40(r11) */ |
138 | #endif | ||
139 | 0x7d8903a6, /* mtctr r12 */ | ||
140 | 0x4e800420 /* bctr */ | ||
141 | }; | ||
142 | |||
143 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
144 | |||
145 | static u32 ppc64_stub_mask[] = { | ||
146 | 0xffff0000, | ||
147 | 0xffff0000, | ||
148 | 0xffffffff, | ||
149 | 0xffffffff, | ||
150 | #if !defined(_CALL_ELF) || _CALL_ELF != 2 | ||
151 | 0xffffffff, | ||
152 | #endif | ||
153 | 0xffffffff, | ||
154 | 0xffffffff | ||
155 | }; | ||
156 | |||
157 | bool is_module_trampoline(u32 *p) | ||
158 | { | ||
159 | unsigned int i; | ||
160 | u32 insns[ARRAY_SIZE(ppc64_stub_insns)]; | ||
161 | |||
162 | BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask)); | ||
163 | |||
164 | if (probe_kernel_read(insns, p, sizeof(insns))) | ||
165 | return -EFAULT; | ||
166 | |||
167 | for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) { | ||
168 | u32 insna = insns[i]; | ||
169 | u32 insnb = ppc64_stub_insns[i]; | ||
170 | u32 mask = ppc64_stub_mask[i]; | ||
171 | |||
172 | if ((insna & mask) != (insnb & mask)) | ||
173 | return false; | ||
174 | } | ||
175 | |||
176 | return true; | ||
177 | } | ||
178 | |||
179 | int module_trampoline_target(struct module *mod, u32 *trampoline, | ||
180 | unsigned long *target) | ||
181 | { | ||
182 | u32 buf[2]; | ||
183 | u16 upper, lower; | ||
184 | long offset; | ||
185 | void *toc_entry; | ||
186 | |||
187 | if (probe_kernel_read(buf, trampoline, sizeof(buf))) | ||
188 | return -EFAULT; | ||
189 | |||
190 | upper = buf[0] & 0xffff; | ||
191 | lower = buf[1] & 0xffff; | ||
192 | |||
193 | /* perform the addis/addi, both signed */ | ||
194 | offset = ((short)upper << 16) + (short)lower; | ||
195 | |||
196 | /* | ||
197 | * Now get the address this trampoline jumps to. This | ||
198 | * is always 32 bytes into our trampoline stub. | ||
199 | */ | ||
200 | toc_entry = (void *)mod->arch.toc + offset + 32; | ||
201 | |||
202 | if (probe_kernel_read(target, toc_entry, sizeof(*target))) | ||
203 | return -EFAULT; | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
82 | #endif | 208 | #endif |
83 | } }; | ||
84 | 209 | ||
85 | /* Count how many different 24-bit relocations (different symbol, | 210 | /* Count how many different 24-bit relocations (different symbol, |
86 | different addend) */ | 211 | different addend) */ |
@@ -183,6 +308,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, | |||
183 | return relocs * sizeof(struct ppc64_stub_entry); | 308 | return relocs * sizeof(struct ppc64_stub_entry); |
184 | } | 309 | } |
185 | 310 | ||
311 | /* Still needed for ELFv2, for .TOC. */ | ||
186 | static void dedotify_versions(struct modversion_info *vers, | 312 | static void dedotify_versions(struct modversion_info *vers, |
187 | unsigned long size) | 313 | unsigned long size) |
188 | { | 314 | { |
@@ -193,7 +319,7 @@ static void dedotify_versions(struct modversion_info *vers, | |||
193 | memmove(vers->name, vers->name+1, strlen(vers->name)); | 319 | memmove(vers->name, vers->name+1, strlen(vers->name)); |
194 | } | 320 | } |
195 | 321 | ||
196 | /* Undefined symbols which refer to .funcname, hack to funcname */ | 322 | /* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ |
197 | static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) | 323 | static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) |
198 | { | 324 | { |
199 | unsigned int i; | 325 | unsigned int i; |
@@ -207,6 +333,24 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) | |||
207 | } | 333 | } |
208 | } | 334 | } |
209 | 335 | ||
336 | static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, | ||
337 | const char *strtab, | ||
338 | unsigned int symindex) | ||
339 | { | ||
340 | unsigned int i, numsyms; | ||
341 | Elf64_Sym *syms; | ||
342 | |||
343 | syms = (Elf64_Sym *)sechdrs[symindex].sh_addr; | ||
344 | numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); | ||
345 | |||
346 | for (i = 1; i < numsyms; i++) { | ||
347 | if (syms[i].st_shndx == SHN_UNDEF | ||
348 | && strcmp(strtab + syms[i].st_name, "TOC.") == 0) | ||
349 | return &syms[i]; | ||
350 | } | ||
351 | return NULL; | ||
352 | } | ||
353 | |||
210 | int module_frob_arch_sections(Elf64_Ehdr *hdr, | 354 | int module_frob_arch_sections(Elf64_Ehdr *hdr, |
211 | Elf64_Shdr *sechdrs, | 355 | Elf64_Shdr *sechdrs, |
212 | char *secstrings, | 356 | char *secstrings, |
@@ -271,21 +415,12 @@ static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me) | |||
271 | /* Patch stub to reference function and correct r2 value. */ | 415 | /* Patch stub to reference function and correct r2 value. */ |
272 | static inline int create_stub(Elf64_Shdr *sechdrs, | 416 | static inline int create_stub(Elf64_Shdr *sechdrs, |
273 | struct ppc64_stub_entry *entry, | 417 | struct ppc64_stub_entry *entry, |
274 | struct ppc64_opd_entry *opd, | 418 | unsigned long addr, |
275 | struct module *me) | 419 | struct module *me) |
276 | { | 420 | { |
277 | Elf64_Half *loc1, *loc2; | ||
278 | long reladdr; | 421 | long reladdr; |
279 | 422 | ||
280 | *entry = ppc64_stub; | 423 | memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); |
281 | |||
282 | #ifdef __LITTLE_ENDIAN__ | ||
283 | loc1 = (Elf64_Half *)&entry->jump[0]; | ||
284 | loc2 = (Elf64_Half *)&entry->jump[4]; | ||
285 | #else | ||
286 | loc1 = (Elf64_Half *)&entry->jump[2]; | ||
287 | loc2 = (Elf64_Half *)&entry->jump[6]; | ||
288 | #endif | ||
289 | 424 | ||
290 | /* Stub uses address relative to r2. */ | 425 | /* Stub uses address relative to r2. */ |
291 | reladdr = (unsigned long)entry - my_r2(sechdrs, me); | 426 | reladdr = (unsigned long)entry - my_r2(sechdrs, me); |
@@ -296,35 +431,33 @@ static inline int create_stub(Elf64_Shdr *sechdrs, | |||
296 | } | 431 | } |
297 | DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); | 432 | DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); |
298 | 433 | ||
299 | *loc1 = PPC_HA(reladdr); | 434 | entry->jump[0] |= PPC_HA(reladdr); |
300 | *loc2 = PPC_LO(reladdr); | 435 | entry->jump[1] |= PPC_LO(reladdr); |
301 | entry->opd.funcaddr = opd->funcaddr; | 436 | entry->funcdata = func_desc(addr); |
302 | entry->opd.r2 = opd->r2; | ||
303 | return 1; | 437 | return 1; |
304 | } | 438 | } |
305 | 439 | ||
306 | /* Create stub to jump to function described in this OPD: we need the | 440 | /* Create stub to jump to function described in this OPD/ptr: we need the |
307 | stub to set up the TOC ptr (r2) for the function. */ | 441 | stub to set up the TOC ptr (r2) for the function. */ |
308 | static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, | 442 | static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, |
309 | unsigned long opdaddr, | 443 | unsigned long addr, |
310 | struct module *me) | 444 | struct module *me) |
311 | { | 445 | { |
312 | struct ppc64_stub_entry *stubs; | 446 | struct ppc64_stub_entry *stubs; |
313 | struct ppc64_opd_entry *opd = (void *)opdaddr; | ||
314 | unsigned int i, num_stubs; | 447 | unsigned int i, num_stubs; |
315 | 448 | ||
316 | num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); | 449 | num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); |
317 | 450 | ||
318 | /* Find this stub, or if that fails, the next avail. entry */ | 451 | /* Find this stub, or if that fails, the next avail. entry */ |
319 | stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; | 452 | stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; |
320 | for (i = 0; stubs[i].opd.funcaddr; i++) { | 453 | for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { |
321 | BUG_ON(i >= num_stubs); | 454 | BUG_ON(i >= num_stubs); |
322 | 455 | ||
323 | if (stubs[i].opd.funcaddr == opd->funcaddr) | 456 | if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) |
324 | return (unsigned long)&stubs[i]; | 457 | return (unsigned long)&stubs[i]; |
325 | } | 458 | } |
326 | 459 | ||
327 | if (!create_stub(sechdrs, &stubs[i], opd, me)) | 460 | if (!create_stub(sechdrs, &stubs[i], addr, me)) |
328 | return 0; | 461 | return 0; |
329 | 462 | ||
330 | return (unsigned long)&stubs[i]; | 463 | return (unsigned long)&stubs[i]; |
@@ -339,7 +472,8 @@ static int restore_r2(u32 *instruction, struct module *me) | |||
339 | me->name, *instruction); | 472 | me->name, *instruction); |
340 | return 0; | 473 | return 0; |
341 | } | 474 | } |
342 | *instruction = 0xe8410028; /* ld r2,40(r1) */ | 475 | /* ld r2,R2_STACK_OFFSET(r1) */ |
476 | *instruction = 0xe8410000 | R2_STACK_OFFSET; | ||
343 | return 1; | 477 | return 1; |
344 | } | 478 | } |
345 | 479 | ||
@@ -357,6 +491,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
357 | 491 | ||
358 | DEBUGP("Applying ADD relocate section %u to %u\n", relsec, | 492 | DEBUGP("Applying ADD relocate section %u to %u\n", relsec, |
359 | sechdrs[relsec].sh_info); | 493 | sechdrs[relsec].sh_info); |
494 | |||
495 | /* First time we're called, we can fix up .TOC. */ | ||
496 | if (!me->arch.toc_fixed) { | ||
497 | sym = find_dot_toc(sechdrs, strtab, symindex); | ||
498 | /* It's theoretically possible that a module doesn't want a | ||
499 | * .TOC. so don't fail it just for that. */ | ||
500 | if (sym) | ||
501 | sym->st_value = my_r2(sechdrs, me); | ||
502 | me->arch.toc_fixed = true; | ||
503 | } | ||
504 | |||
360 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { | 505 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { |
361 | /* This is where to make the change */ | 506 | /* This is where to make the change */ |
362 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | 507 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr |
@@ -453,7 +598,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
453 | return -ENOENT; | 598 | return -ENOENT; |
454 | if (!restore_r2((u32 *)location + 1, me)) | 599 | if (!restore_r2((u32 *)location + 1, me)) |
455 | return -ENOEXEC; | 600 | return -ENOEXEC; |
456 | } | 601 | } else |
602 | value += local_entry_offset(sym); | ||
457 | 603 | ||
458 | /* Convert value to relative */ | 604 | /* Convert value to relative */ |
459 | value -= (unsigned long)location; | 605 | value -= (unsigned long)location; |
@@ -474,6 +620,31 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
474 | *location = value - (unsigned long)location; | 620 | *location = value - (unsigned long)location; |
475 | break; | 621 | break; |
476 | 622 | ||
623 | case R_PPC64_TOCSAVE: | ||
624 | /* | ||
625 | * Marker reloc indicates we don't have to save r2. | ||
626 | * That would only save us one instruction, so ignore | ||
627 | * it. | ||
628 | */ | ||
629 | break; | ||
630 | |||
631 | case R_PPC64_REL16_HA: | ||
632 | /* Subtract location pointer */ | ||
633 | value -= (unsigned long)location; | ||
634 | value = ((value + 0x8000) >> 16); | ||
635 | *((uint16_t *) location) | ||
636 | = (*((uint16_t *) location) & ~0xffff) | ||
637 | | (value & 0xffff); | ||
638 | break; | ||
639 | |||
640 | case R_PPC64_REL16_LO: | ||
641 | /* Subtract location pointer */ | ||
642 | value -= (unsigned long)location; | ||
643 | *((uint16_t *) location) | ||
644 | = (*((uint16_t *) location) & ~0xffff) | ||
645 | | (value & 0xffff); | ||
646 | break; | ||
647 | |||
477 | default: | 648 | default: |
478 | printk("%s: Unknown ADD relocation: %lu\n", | 649 | printk("%s: Unknown ADD relocation: %lu\n", |
479 | me->name, | 650 | me->name, |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 24d342e91790..b49c72fd7f16 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/delay.h> | ||
24 | #include <linux/export.h> | 25 | #include <linux/export.h> |
25 | #include <linux/of_address.h> | 26 | #include <linux/of_address.h> |
26 | #include <linux/of_pci.h> | 27 | #include <linux/of_pci.h> |
@@ -120,6 +121,25 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus, | |||
120 | return 1; | 121 | return 1; |
121 | } | 122 | } |
122 | 123 | ||
124 | void pcibios_reset_secondary_bus(struct pci_dev *dev) | ||
125 | { | ||
126 | u16 ctrl; | ||
127 | |||
128 | if (ppc_md.pcibios_reset_secondary_bus) { | ||
129 | ppc_md.pcibios_reset_secondary_bus(dev); | ||
130 | return; | ||
131 | } | ||
132 | |||
133 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); | ||
134 | ctrl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
135 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); | ||
136 | msleep(2); | ||
137 | |||
138 | ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
139 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); | ||
140 | ssleep(1); | ||
141 | } | ||
142 | |||
123 | static resource_size_t pcibios_io_size(const struct pci_controller *hose) | 143 | static resource_size_t pcibios_io_size(const struct pci_controller *hose) |
124 | { | 144 | { |
125 | #ifdef CONFIG_PPC64 | 145 | #ifdef CONFIG_PPC64 |
@@ -646,60 +666,36 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, | |||
646 | void pci_process_bridge_OF_ranges(struct pci_controller *hose, | 666 | void pci_process_bridge_OF_ranges(struct pci_controller *hose, |
647 | struct device_node *dev, int primary) | 667 | struct device_node *dev, int primary) |
648 | { | 668 | { |
649 | const __be32 *ranges; | ||
650 | int rlen; | ||
651 | int pna = of_n_addr_cells(dev); | ||
652 | int np = pna + 5; | ||
653 | int memno = 0; | 669 | int memno = 0; |
654 | u32 pci_space; | ||
655 | unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; | ||
656 | struct resource *res; | 670 | struct resource *res; |
671 | struct of_pci_range range; | ||
672 | struct of_pci_range_parser parser; | ||
657 | 673 | ||
658 | printk(KERN_INFO "PCI host bridge %s %s ranges:\n", | 674 | printk(KERN_INFO "PCI host bridge %s %s ranges:\n", |
659 | dev->full_name, primary ? "(primary)" : ""); | 675 | dev->full_name, primary ? "(primary)" : ""); |
660 | 676 | ||
661 | /* Get ranges property */ | 677 | /* Check for ranges property */ |
662 | ranges = of_get_property(dev, "ranges", &rlen); | 678 | if (of_pci_range_parser_init(&parser, dev)) |
663 | if (ranges == NULL) | ||
664 | return; | 679 | return; |
665 | 680 | ||
666 | /* Parse it */ | 681 | /* Parse it */ |
667 | while ((rlen -= np * 4) >= 0) { | 682 | for_each_of_pci_range(&parser, &range) { |
668 | /* Read next ranges element */ | ||
669 | pci_space = of_read_number(ranges, 1); | ||
670 | pci_addr = of_read_number(ranges + 1, 2); | ||
671 | cpu_addr = of_translate_address(dev, ranges + 3); | ||
672 | size = of_read_number(ranges + pna + 3, 2); | ||
673 | ranges += np; | ||
674 | |||
675 | /* If we failed translation or got a zero-sized region | 683 | /* If we failed translation or got a zero-sized region |
676 | * (some FW try to feed us with non sensical zero sized regions | 684 | * (some FW try to feed us with non sensical zero sized regions |
677 | * such as power3 which look like some kind of attempt at exposing | 685 | * such as power3 which look like some kind of attempt at exposing |
678 | * the VGA memory hole) | 686 | * the VGA memory hole) |
679 | */ | 687 | */ |
680 | if (cpu_addr == OF_BAD_ADDR || size == 0) | 688 | if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) |
681 | continue; | 689 | continue; |
682 | 690 | ||
683 | /* Now consume following elements while they are contiguous */ | ||
684 | for (; rlen >= np * sizeof(u32); | ||
685 | ranges += np, rlen -= np * 4) { | ||
686 | if (of_read_number(ranges, 1) != pci_space) | ||
687 | break; | ||
688 | pci_next = of_read_number(ranges + 1, 2); | ||
689 | cpu_next = of_translate_address(dev, ranges + 3); | ||
690 | if (pci_next != pci_addr + size || | ||
691 | cpu_next != cpu_addr + size) | ||
692 | break; | ||
693 | size += of_read_number(ranges + pna + 3, 2); | ||
694 | } | ||
695 | |||
696 | /* Act based on address space type */ | 691 | /* Act based on address space type */ |
697 | res = NULL; | 692 | res = NULL; |
698 | switch ((pci_space >> 24) & 0x3) { | 693 | switch (range.flags & IORESOURCE_TYPE_BITS) { |
699 | case 1: /* PCI IO space */ | 694 | case IORESOURCE_IO: |
700 | printk(KERN_INFO | 695 | printk(KERN_INFO |
701 | " IO 0x%016llx..0x%016llx -> 0x%016llx\n", | 696 | " IO 0x%016llx..0x%016llx -> 0x%016llx\n", |
702 | cpu_addr, cpu_addr + size - 1, pci_addr); | 697 | range.cpu_addr, range.cpu_addr + range.size - 1, |
698 | range.pci_addr); | ||
703 | 699 | ||
704 | /* We support only one IO range */ | 700 | /* We support only one IO range */ |
705 | if (hose->pci_io_size) { | 701 | if (hose->pci_io_size) { |
@@ -709,11 +705,12 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, | |||
709 | } | 705 | } |
710 | #ifdef CONFIG_PPC32 | 706 | #ifdef CONFIG_PPC32 |
711 | /* On 32 bits, limit I/O space to 16MB */ | 707 | /* On 32 bits, limit I/O space to 16MB */ |
712 | if (size > 0x01000000) | 708 | if (range.size > 0x01000000) |
713 | size = 0x01000000; | 709 | range.size = 0x01000000; |
714 | 710 | ||
715 | /* 32 bits needs to map IOs here */ | 711 | /* 32 bits needs to map IOs here */ |
716 | hose->io_base_virt = ioremap(cpu_addr, size); | 712 | hose->io_base_virt = ioremap(range.cpu_addr, |
713 | range.size); | ||
717 | 714 | ||
718 | /* Expect trouble if pci_addr is not 0 */ | 715 | /* Expect trouble if pci_addr is not 0 */ |
719 | if (primary) | 716 | if (primary) |
@@ -723,20 +720,20 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, | |||
723 | /* pci_io_size and io_base_phys always represent IO | 720 | /* pci_io_size and io_base_phys always represent IO |
724 | * space starting at 0 so we factor in pci_addr | 721 | * space starting at 0 so we factor in pci_addr |
725 | */ | 722 | */ |
726 | hose->pci_io_size = pci_addr + size; | 723 | hose->pci_io_size = range.pci_addr + range.size; |
727 | hose->io_base_phys = cpu_addr - pci_addr; | 724 | hose->io_base_phys = range.cpu_addr - range.pci_addr; |
728 | 725 | ||
729 | /* Build resource */ | 726 | /* Build resource */ |
730 | res = &hose->io_resource; | 727 | res = &hose->io_resource; |
731 | res->flags = IORESOURCE_IO; | 728 | range.cpu_addr = range.pci_addr; |
732 | res->start = pci_addr; | ||
733 | break; | 729 | break; |
734 | case 2: /* PCI Memory space */ | 730 | case IORESOURCE_MEM: |
735 | case 3: /* PCI 64 bits Memory space */ | ||
736 | printk(KERN_INFO | 731 | printk(KERN_INFO |
737 | " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", | 732 | " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", |
738 | cpu_addr, cpu_addr + size - 1, pci_addr, | 733 | range.cpu_addr, range.cpu_addr + range.size - 1, |
739 | (pci_space & 0x40000000) ? "Prefetch" : ""); | 734 | range.pci_addr, |
735 | (range.pci_space & 0x40000000) ? | ||
736 | "Prefetch" : ""); | ||
740 | 737 | ||
741 | /* We support only 3 memory ranges */ | 738 | /* We support only 3 memory ranges */ |
742 | if (memno >= 3) { | 739 | if (memno >= 3) { |
@@ -745,28 +742,21 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, | |||
745 | continue; | 742 | continue; |
746 | } | 743 | } |
747 | /* Handles ISA memory hole space here */ | 744 | /* Handles ISA memory hole space here */ |
748 | if (pci_addr == 0) { | 745 | if (range.pci_addr == 0) { |
749 | if (primary || isa_mem_base == 0) | 746 | if (primary || isa_mem_base == 0) |
750 | isa_mem_base = cpu_addr; | 747 | isa_mem_base = range.cpu_addr; |
751 | hose->isa_mem_phys = cpu_addr; | 748 | hose->isa_mem_phys = range.cpu_addr; |
752 | hose->isa_mem_size = size; | 749 | hose->isa_mem_size = range.size; |
753 | } | 750 | } |
754 | 751 | ||
755 | /* Build resource */ | 752 | /* Build resource */ |
756 | hose->mem_offset[memno] = cpu_addr - pci_addr; | 753 | hose->mem_offset[memno] = range.cpu_addr - |
754 | range.pci_addr; | ||
757 | res = &hose->mem_resources[memno++]; | 755 | res = &hose->mem_resources[memno++]; |
758 | res->flags = IORESOURCE_MEM; | ||
759 | if (pci_space & 0x40000000) | ||
760 | res->flags |= IORESOURCE_PREFETCH; | ||
761 | res->start = cpu_addr; | ||
762 | break; | 756 | break; |
763 | } | 757 | } |
764 | if (res != NULL) { | 758 | if (res != NULL) { |
765 | res->name = dev->full_name; | 759 | of_pci_range_to_resource(&range, dev, res); |
766 | res->end = res->start + size - 1; | ||
767 | res->parent = NULL; | ||
768 | res->sibling = NULL; | ||
769 | res->child = NULL; | ||
770 | } | 760 | } |
771 | } | 761 | } |
772 | } | 762 | } |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 059e244484fe..44562aa97f16 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -304,6 +304,9 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, | |||
304 | struct pci_dev *dev = NULL; | 304 | struct pci_dev *dev = NULL; |
305 | const __be32 *reg; | 305 | const __be32 *reg; |
306 | int reglen, devfn; | 306 | int reglen, devfn; |
307 | #ifdef CONFIG_EEH | ||
308 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | ||
309 | #endif | ||
307 | 310 | ||
308 | pr_debug(" * %s\n", dn->full_name); | 311 | pr_debug(" * %s\n", dn->full_name); |
309 | if (!of_device_is_available(dn)) | 312 | if (!of_device_is_available(dn)) |
@@ -321,6 +324,12 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, | |||
321 | return dev; | 324 | return dev; |
322 | } | 325 | } |
323 | 326 | ||
327 | /* Device removed permanently ? */ | ||
328 | #ifdef CONFIG_EEH | ||
329 | if (edev && (edev->mode & EEH_DEV_REMOVED)) | ||
330 | return NULL; | ||
331 | #endif | ||
332 | |||
324 | /* create a new pci_dev for this device */ | 333 | /* create a new pci_dev for this device */ |
325 | dev = of_create_pci_dev(dn, bus, devfn); | 334 | dev = of_create_pci_dev(dn, bus, devfn); |
326 | if (!dev) | 335 | if (!dev) |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 450850a49dce..48d17d6fca5b 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -155,9 +155,7 @@ EXPORT_SYMBOL(__cmpdi2); | |||
155 | #endif | 155 | #endif |
156 | long long __bswapdi2(long long); | 156 | long long __bswapdi2(long long); |
157 | EXPORT_SYMBOL(__bswapdi2); | 157 | EXPORT_SYMBOL(__bswapdi2); |
158 | #ifdef __BIG_ENDIAN__ | ||
159 | EXPORT_SYMBOL(memcpy); | 158 | EXPORT_SYMBOL(memcpy); |
160 | #endif | ||
161 | EXPORT_SYMBOL(memset); | 159 | EXPORT_SYMBOL(memset); |
162 | EXPORT_SYMBOL(memmove); | 160 | EXPORT_SYMBOL(memmove); |
163 | EXPORT_SYMBOL(memcmp); | 161 | EXPORT_SYMBOL(memcmp); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 31d021506d21..8a1edbe26b8f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #ifdef CONFIG_PPC64 | 54 | #ifdef CONFIG_PPC64 |
55 | #include <asm/firmware.h> | 55 | #include <asm/firmware.h> |
56 | #endif | 56 | #endif |
57 | #include <asm/code-patching.h> | ||
57 | #include <linux/kprobes.h> | 58 | #include <linux/kprobes.h> |
58 | #include <linux/kdebug.h> | 59 | #include <linux/kdebug.h> |
59 | 60 | ||
@@ -495,14 +496,21 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk) | |||
495 | return 0; | 496 | return 0; |
496 | } | 497 | } |
497 | 498 | ||
498 | int set_breakpoint(struct arch_hw_breakpoint *brk) | 499 | void __set_breakpoint(struct arch_hw_breakpoint *brk) |
499 | { | 500 | { |
500 | __get_cpu_var(current_brk) = *brk; | 501 | __get_cpu_var(current_brk) = *brk; |
501 | 502 | ||
502 | if (cpu_has_feature(CPU_FTR_DAWR)) | 503 | if (cpu_has_feature(CPU_FTR_DAWR)) |
503 | return set_dawr(brk); | 504 | set_dawr(brk); |
505 | else | ||
506 | set_dabr(brk); | ||
507 | } | ||
504 | 508 | ||
505 | return set_dabr(brk); | 509 | void set_breakpoint(struct arch_hw_breakpoint *brk) |
510 | { | ||
511 | preempt_disable(); | ||
512 | __set_breakpoint(brk); | ||
513 | preempt_enable(); | ||
506 | } | 514 | } |
507 | 515 | ||
508 | #ifdef CONFIG_PPC64 | 516 | #ifdef CONFIG_PPC64 |
@@ -834,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
834 | */ | 842 | */ |
835 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 843 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
836 | if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) | 844 | if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) |
837 | set_breakpoint(&new->thread.hw_brk); | 845 | __set_breakpoint(&new->thread.hw_brk); |
838 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 846 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
839 | #endif | 847 | #endif |
840 | 848 | ||
@@ -1108,7 +1116,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
1108 | struct thread_info *ti = (void *)task_stack_page(p); | 1116 | struct thread_info *ti = (void *)task_stack_page(p); |
1109 | memset(childregs, 0, sizeof(struct pt_regs)); | 1117 | memset(childregs, 0, sizeof(struct pt_regs)); |
1110 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | 1118 | childregs->gpr[1] = sp + sizeof(struct pt_regs); |
1111 | childregs->gpr[14] = usp; /* function */ | 1119 | /* function */ |
1120 | if (usp) | ||
1121 | childregs->gpr[14] = ppc_function_entry((void *)usp); | ||
1112 | #ifdef CONFIG_PPC64 | 1122 | #ifdef CONFIG_PPC64 |
1113 | clear_tsk_thread_flag(p, TIF_32BIT); | 1123 | clear_tsk_thread_flag(p, TIF_32BIT); |
1114 | childregs->softe = 1; | 1124 | childregs->softe = 1; |
@@ -1187,17 +1197,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
1187 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) | 1197 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) |
1188 | p->thread.ppr = INIT_PPR; | 1198 | p->thread.ppr = INIT_PPR; |
1189 | #endif | 1199 | #endif |
1190 | /* | 1200 | kregs->nip = ppc_function_entry(f); |
1191 | * The PPC64 ABI makes use of a TOC to contain function | ||
1192 | * pointers. The function (ret_from_except) is actually a pointer | ||
1193 | * to the TOC entry. The first entry is a pointer to the actual | ||
1194 | * function. | ||
1195 | */ | ||
1196 | #ifdef CONFIG_PPC64 | ||
1197 | kregs->nip = *((unsigned long *)f); | ||
1198 | #else | ||
1199 | kregs->nip = (unsigned long)f; | ||
1200 | #endif | ||
1201 | return 0; | 1201 | return 0; |
1202 | } | 1202 | } |
1203 | 1203 | ||
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index b0c263da219a..77aa1e95e904 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh | |||
@@ -23,7 +23,7 @@ strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 | |||
23 | reloc_got2 kernstart_addr memstart_addr linux_banner _stext | 23 | reloc_got2 kernstart_addr memstart_addr linux_banner _stext |
24 | opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry | 24 | opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry |
25 | boot_command_line __prom_init_toc_start __prom_init_toc_end | 25 | boot_command_line __prom_init_toc_start __prom_init_toc_end |
26 | btext_setup_display" | 26 | btext_setup_display TOC." |
27 | 27 | ||
28 | NM="$1" | 28 | NM="$1" |
29 | OBJ="$2" | 29 | OBJ="$2" |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 7d4c7172f38e..c168337aef9d 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -80,10 +80,6 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) | |||
80 | if (ret) | 80 | if (ret) |
81 | return PCIBIOS_DEVICE_NOT_FOUND; | 81 | return PCIBIOS_DEVICE_NOT_FOUND; |
82 | 82 | ||
83 | if (returnval == EEH_IO_ERROR_VALUE(size) && | ||
84 | eeh_dev_check_failure(of_node_to_eeh_dev(pdn->node))) | ||
85 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
86 | |||
87 | return PCIBIOS_SUCCESSFUL; | 83 | return PCIBIOS_SUCCESSFUL; |
88 | } | 84 | } |
89 | 85 | ||
@@ -92,18 +88,39 @@ static int rtas_pci_read_config(struct pci_bus *bus, | |||
92 | int where, int size, u32 *val) | 88 | int where, int size, u32 *val) |
93 | { | 89 | { |
94 | struct device_node *busdn, *dn; | 90 | struct device_node *busdn, *dn; |
95 | 91 | struct pci_dn *pdn; | |
96 | busdn = pci_bus_to_OF_node(bus); | 92 | bool found = false; |
93 | #ifdef CONFIG_EEH | ||
94 | struct eeh_dev *edev; | ||
95 | #endif | ||
96 | int ret; | ||
97 | 97 | ||
98 | /* Search only direct children of the bus */ | 98 | /* Search only direct children of the bus */ |
99 | *val = 0xFFFFFFFF; | ||
100 | busdn = pci_bus_to_OF_node(bus); | ||
99 | for (dn = busdn->child; dn; dn = dn->sibling) { | 101 | for (dn = busdn->child; dn; dn = dn->sibling) { |
100 | struct pci_dn *pdn = PCI_DN(dn); | 102 | pdn = PCI_DN(dn); |
101 | if (pdn && pdn->devfn == devfn | 103 | if (pdn && pdn->devfn == devfn |
102 | && of_device_is_available(dn)) | 104 | && of_device_is_available(dn)) { |
103 | return rtas_read_config(pdn, where, size, val); | 105 | found = true; |
106 | break; | ||
107 | } | ||
104 | } | 108 | } |
105 | 109 | ||
106 | return PCIBIOS_DEVICE_NOT_FOUND; | 110 | if (!found) |
111 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
112 | #ifdef CONFIG_EEH | ||
113 | edev = of_node_to_eeh_dev(dn); | ||
114 | if (edev && edev->pe && edev->pe->state & EEH_PE_RESET) | ||
115 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
116 | #endif | ||
117 | |||
118 | ret = rtas_read_config(pdn, where, size, val); | ||
119 | if (*val == EEH_IO_ERROR_VALUE(size) && | ||
120 | eeh_dev_check_failure(of_node_to_eeh_dev(dn))) | ||
121 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
122 | |||
123 | return ret; | ||
107 | } | 124 | } |
108 | 125 | ||
109 | int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val) | 126 | int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val) |
@@ -136,17 +153,34 @@ static int rtas_pci_write_config(struct pci_bus *bus, | |||
136 | int where, int size, u32 val) | 153 | int where, int size, u32 val) |
137 | { | 154 | { |
138 | struct device_node *busdn, *dn; | 155 | struct device_node *busdn, *dn; |
139 | 156 | struct pci_dn *pdn; | |
140 | busdn = pci_bus_to_OF_node(bus); | 157 | bool found = false; |
158 | #ifdef CONFIG_EEH | ||
159 | struct eeh_dev *edev; | ||
160 | #endif | ||
161 | int ret; | ||
141 | 162 | ||
142 | /* Search only direct children of the bus */ | 163 | /* Search only direct children of the bus */ |
164 | busdn = pci_bus_to_OF_node(bus); | ||
143 | for (dn = busdn->child; dn; dn = dn->sibling) { | 165 | for (dn = busdn->child; dn; dn = dn->sibling) { |
144 | struct pci_dn *pdn = PCI_DN(dn); | 166 | pdn = PCI_DN(dn); |
145 | if (pdn && pdn->devfn == devfn | 167 | if (pdn && pdn->devfn == devfn |
146 | && of_device_is_available(dn)) | 168 | && of_device_is_available(dn)) { |
147 | return rtas_write_config(pdn, where, size, val); | 169 | found = true; |
170 | break; | ||
171 | } | ||
148 | } | 172 | } |
149 | return PCIBIOS_DEVICE_NOT_FOUND; | 173 | |
174 | if (!found) | ||
175 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
176 | #ifdef CONFIG_EEH | ||
177 | edev = of_node_to_eeh_dev(dn); | ||
178 | if (edev && edev->pe && (edev->pe->state & EEH_PE_RESET)) | ||
179 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
180 | #endif | ||
181 | ret = rtas_write_config(pdn, where, size, val); | ||
182 | |||
183 | return ret; | ||
150 | } | 184 | } |
151 | 185 | ||
152 | static struct pci_ops rtas_pci_ops = { | 186 | static struct pci_ops rtas_pci_ops = { |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 79b7612ac6fa..aa0f5edd8570 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -212,6 +212,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
212 | { | 212 | { |
213 | unsigned long cpu_id = (unsigned long)v - 1; | 213 | unsigned long cpu_id = (unsigned long)v - 1; |
214 | unsigned int pvr; | 214 | unsigned int pvr; |
215 | unsigned long proc_freq; | ||
215 | unsigned short maj; | 216 | unsigned short maj; |
216 | unsigned short min; | 217 | unsigned short min; |
217 | 218 | ||
@@ -263,12 +264,19 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
263 | #endif /* CONFIG_TAU */ | 264 | #endif /* CONFIG_TAU */ |
264 | 265 | ||
265 | /* | 266 | /* |
266 | * Assume here that all clock rates are the same in a | 267 | * Platforms that have variable clock rates, should implement |
267 | * smp system. -- Cort | 268 | * the method ppc_md.get_proc_freq() that reports the clock |
269 | * rate of a given cpu. The rest can use ppc_proc_freq to | ||
270 | * report the clock rate that is same across all cpus. | ||
268 | */ | 271 | */ |
269 | if (ppc_proc_freq) | 272 | if (ppc_md.get_proc_freq) |
273 | proc_freq = ppc_md.get_proc_freq(cpu_id); | ||
274 | else | ||
275 | proc_freq = ppc_proc_freq; | ||
276 | |||
277 | if (proc_freq) | ||
270 | seq_printf(m, "clock\t\t: %lu.%06luMHz\n", | 278 | seq_printf(m, "clock\t\t: %lu.%06luMHz\n", |
271 | ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); | 279 | proc_freq / 1000000, proc_freq % 1000000); |
272 | 280 | ||
273 | if (ppc_md.show_percpuinfo != NULL) | 281 | if (ppc_md.show_percpuinfo != NULL) |
274 | ppc_md.show_percpuinfo(m, cpu_id); | 282 | ppc_md.show_percpuinfo(m, cpu_id); |
@@ -382,9 +390,10 @@ void __init check_for_initrd(void) | |||
382 | 390 | ||
383 | #ifdef CONFIG_SMP | 391 | #ifdef CONFIG_SMP |
384 | 392 | ||
385 | int threads_per_core, threads_shift; | 393 | int threads_per_core, threads_per_subcore, threads_shift; |
386 | cpumask_t threads_core_mask; | 394 | cpumask_t threads_core_mask; |
387 | EXPORT_SYMBOL_GPL(threads_per_core); | 395 | EXPORT_SYMBOL_GPL(threads_per_core); |
396 | EXPORT_SYMBOL_GPL(threads_per_subcore); | ||
388 | EXPORT_SYMBOL_GPL(threads_shift); | 397 | EXPORT_SYMBOL_GPL(threads_shift); |
389 | EXPORT_SYMBOL_GPL(threads_core_mask); | 398 | EXPORT_SYMBOL_GPL(threads_core_mask); |
390 | 399 | ||
@@ -393,6 +402,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
393 | int i; | 402 | int i; |
394 | 403 | ||
395 | threads_per_core = tpc; | 404 | threads_per_core = tpc; |
405 | threads_per_subcore = tpc; | ||
396 | cpumask_clear(&threads_core_mask); | 406 | cpumask_clear(&threads_core_mask); |
397 | 407 | ||
398 | /* This implementation only supports power of 2 number of threads | 408 | /* This implementation only supports power of 2 number of threads |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index fbe24377eda3..ee082d771178 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/lockdep.h> | 36 | #include <linux/lockdep.h> |
37 | #include <linux/memblock.h> | 37 | #include <linux/memblock.h> |
38 | #include <linux/hugetlb.h> | 38 | #include <linux/hugetlb.h> |
39 | #include <linux/memory.h> | ||
39 | 40 | ||
40 | #include <asm/io.h> | 41 | #include <asm/io.h> |
41 | #include <asm/kdump.h> | 42 | #include <asm/kdump.h> |
@@ -341,7 +342,7 @@ void smp_release_cpus(void) | |||
341 | 342 | ||
342 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop | 343 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop |
343 | - PHYSICAL_START); | 344 | - PHYSICAL_START); |
344 | *ptr = __pa(generic_secondary_smp_init); | 345 | *ptr = ppc_function_entry(generic_secondary_smp_init); |
345 | 346 | ||
346 | /* And wait a bit for them to catch up */ | 347 | /* And wait a bit for them to catch up */ |
347 | for (i = 0; i < 100000; i++) { | 348 | for (i = 0; i < 100000; i++) { |
@@ -780,6 +781,15 @@ void __init setup_per_cpu_areas(void) | |||
780 | } | 781 | } |
781 | #endif | 782 | #endif |
782 | 783 | ||
784 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | ||
785 | unsigned long memory_block_size_bytes(void) | ||
786 | { | ||
787 | if (ppc_md.memory_block_size) | ||
788 | return ppc_md.memory_block_size(); | ||
789 | |||
790 | return MIN_MEMORY_BLOCK_SIZE; | ||
791 | } | ||
792 | #endif | ||
783 | 793 | ||
784 | #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) | 794 | #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO) |
785 | struct ppc_pci_io ppc_pci_io; | 795 | struct ppc_pci_io ppc_pci_io; |
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 8fc4177ed65a..1c794cef2883 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
@@ -134,7 +134,7 @@ static int do_signal(struct pt_regs *regs) | |||
134 | */ | 134 | */ |
135 | if (current->thread.hw_brk.address && | 135 | if (current->thread.hw_brk.address && |
136 | current->thread.hw_brk.type) | 136 | current->thread.hw_brk.type) |
137 | set_breakpoint(¤t->thread.hw_brk); | 137 | __set_breakpoint(¤t->thread.hw_brk); |
138 | #endif | 138 | #endif |
139 | /* Re-enable the breakpoints for the signal stack */ | 139 | /* Re-enable the breakpoints for the signal stack */ |
140 | thread_change_pc(current, regs); | 140 | thread_change_pc(current, regs); |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 10ffffef0414..7753af2d2613 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/atomic.h> | 36 | #include <linux/atomic.h> |
37 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
38 | #include <asm/hw_irq.h> | 38 | #include <asm/hw_irq.h> |
39 | #include <asm/kvm_ppc.h> | ||
39 | #include <asm/page.h> | 40 | #include <asm/page.h> |
40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
41 | #include <asm/prom.h> | 42 | #include <asm/prom.h> |
@@ -390,6 +391,7 @@ void smp_prepare_boot_cpu(void) | |||
390 | #ifdef CONFIG_PPC64 | 391 | #ifdef CONFIG_PPC64 |
391 | paca[boot_cpuid].__current = current; | 392 | paca[boot_cpuid].__current = current; |
392 | #endif | 393 | #endif |
394 | set_numa_node(numa_cpu_lookup_table[boot_cpuid]); | ||
393 | current_set[boot_cpuid] = task_thread_info(current); | 395 | current_set[boot_cpuid] = task_thread_info(current); |
394 | } | 396 | } |
395 | 397 | ||
@@ -457,38 +459,9 @@ int generic_check_cpu_restart(unsigned int cpu) | |||
457 | return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; | 459 | return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; |
458 | } | 460 | } |
459 | 461 | ||
460 | static atomic_t secondary_inhibit_count; | 462 | static bool secondaries_inhibited(void) |
461 | |||
462 | /* | ||
463 | * Don't allow secondary CPU threads to come online | ||
464 | */ | ||
465 | void inhibit_secondary_onlining(void) | ||
466 | { | 463 | { |
467 | /* | 464 | return kvm_hv_mode_active(); |
468 | * This makes secondary_inhibit_count stable during cpu | ||
469 | * online/offline operations. | ||
470 | */ | ||
471 | get_online_cpus(); | ||
472 | |||
473 | atomic_inc(&secondary_inhibit_count); | ||
474 | put_online_cpus(); | ||
475 | } | ||
476 | EXPORT_SYMBOL_GPL(inhibit_secondary_onlining); | ||
477 | |||
478 | /* | ||
479 | * Allow secondary CPU threads to come online again | ||
480 | */ | ||
481 | void uninhibit_secondary_onlining(void) | ||
482 | { | ||
483 | get_online_cpus(); | ||
484 | atomic_dec(&secondary_inhibit_count); | ||
485 | put_online_cpus(); | ||
486 | } | ||
487 | EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining); | ||
488 | |||
489 | static int secondaries_inhibited(void) | ||
490 | { | ||
491 | return atomic_read(&secondary_inhibit_count); | ||
492 | } | 465 | } |
493 | 466 | ||
494 | #else /* HOTPLUG_CPU */ | 467 | #else /* HOTPLUG_CPU */ |
@@ -517,7 +490,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
517 | * Don't allow secondary threads to come online if inhibited | 490 | * Don't allow secondary threads to come online if inhibited |
518 | */ | 491 | */ |
519 | if (threads_per_core > 1 && secondaries_inhibited() && | 492 | if (threads_per_core > 1 && secondaries_inhibited() && |
520 | cpu % threads_per_core != 0) | 493 | cpu_thread_in_subcore(cpu)) |
521 | return -EBUSY; | 494 | return -EBUSY; |
522 | 495 | ||
523 | if (smp_ops == NULL || | 496 | if (smp_ops == NULL || |
@@ -750,6 +723,12 @@ void start_secondary(void *unused) | |||
750 | } | 723 | } |
751 | traverse_core_siblings(cpu, true); | 724 | traverse_core_siblings(cpu, true); |
752 | 725 | ||
726 | /* | ||
727 | * numa_node_id() works after this. | ||
728 | */ | ||
729 | set_numa_node(numa_cpu_lookup_table[cpu]); | ||
730 | set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); | ||
731 | |||
753 | smp_wmb(); | 732 | smp_wmb(); |
754 | notify_cpu_starting(cpu); | 733 | notify_cpu_starting(cpu); |
755 | set_cpu_online(cpu, true); | 734 | set_cpu_online(cpu, true); |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index d90d4b7810d6..67fd2fd2620a 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -404,7 +404,7 @@ void ppc_enable_pmcs(void) | |||
404 | } | 404 | } |
405 | EXPORT_SYMBOL(ppc_enable_pmcs); | 405 | EXPORT_SYMBOL(ppc_enable_pmcs); |
406 | 406 | ||
407 | #define __SYSFS_SPRSETUP(NAME, ADDRESS, EXTRA) \ | 407 | #define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \ |
408 | static void read_##NAME(void *val) \ | 408 | static void read_##NAME(void *val) \ |
409 | { \ | 409 | { \ |
410 | *(unsigned long *)val = mfspr(ADDRESS); \ | 410 | *(unsigned long *)val = mfspr(ADDRESS); \ |
@@ -413,7 +413,9 @@ static void write_##NAME(void *val) \ | |||
413 | { \ | 413 | { \ |
414 | EXTRA; \ | 414 | EXTRA; \ |
415 | mtspr(ADDRESS, *(unsigned long *)val); \ | 415 | mtspr(ADDRESS, *(unsigned long *)val); \ |
416 | } \ | 416 | } |
417 | |||
418 | #define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \ | ||
417 | static ssize_t show_##NAME(struct device *dev, \ | 419 | static ssize_t show_##NAME(struct device *dev, \ |
418 | struct device_attribute *attr, \ | 420 | struct device_attribute *attr, \ |
419 | char *buf) \ | 421 | char *buf) \ |
@@ -436,10 +438,15 @@ static ssize_t __used \ | |||
436 | return count; \ | 438 | return count; \ |
437 | } | 439 | } |
438 | 440 | ||
439 | #define SYSFS_PMCSETUP(NAME, ADDRESS) \ | 441 | #define SYSFS_PMCSETUP(NAME, ADDRESS) \ |
440 | __SYSFS_SPRSETUP(NAME, ADDRESS, ppc_enable_pmcs()) | 442 | __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \ |
441 | #define SYSFS_SPRSETUP(NAME, ADDRESS) \ | 443 | __SYSFS_SPRSETUP_SHOW_STORE(NAME) |
442 | __SYSFS_SPRSETUP(NAME, ADDRESS, ) | 444 | #define SYSFS_SPRSETUP(NAME, ADDRESS) \ |
445 | __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \ | ||
446 | __SYSFS_SPRSETUP_SHOW_STORE(NAME) | ||
447 | |||
448 | #define SYSFS_SPRSETUP_SHOW_STORE(NAME) \ | ||
449 | __SYSFS_SPRSETUP_SHOW_STORE(NAME) | ||
443 | 450 | ||
444 | /* Let's define all possible registers, we'll only hook up the ones | 451 | /* Let's define all possible registers, we'll only hook up the ones |
445 | * that are implemented on the current processor | 452 | * that are implemented on the current processor |
@@ -477,7 +484,6 @@ SYSFS_PMCSETUP(pmc8, SPRN_PMC8); | |||
477 | SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); | 484 | SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); |
478 | SYSFS_SPRSETUP(purr, SPRN_PURR); | 485 | SYSFS_SPRSETUP(purr, SPRN_PURR); |
479 | SYSFS_SPRSETUP(spurr, SPRN_SPURR); | 486 | SYSFS_SPRSETUP(spurr, SPRN_SPURR); |
480 | SYSFS_SPRSETUP(dscr, SPRN_DSCR); | ||
481 | SYSFS_SPRSETUP(pir, SPRN_PIR); | 487 | SYSFS_SPRSETUP(pir, SPRN_PIR); |
482 | 488 | ||
483 | /* | 489 | /* |
@@ -487,12 +493,27 @@ SYSFS_SPRSETUP(pir, SPRN_PIR); | |||
487 | */ | 493 | */ |
488 | static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); | 494 | static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); |
489 | static DEVICE_ATTR(spurr, 0400, show_spurr, NULL); | 495 | static DEVICE_ATTR(spurr, 0400, show_spurr, NULL); |
490 | static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); | ||
491 | static DEVICE_ATTR(purr, 0400, show_purr, store_purr); | 496 | static DEVICE_ATTR(purr, 0400, show_purr, store_purr); |
492 | static DEVICE_ATTR(pir, 0400, show_pir, NULL); | 497 | static DEVICE_ATTR(pir, 0400, show_pir, NULL); |
493 | 498 | ||
494 | unsigned long dscr_default = 0; | 499 | static unsigned long dscr_default; |
495 | EXPORT_SYMBOL(dscr_default); | 500 | |
501 | static void read_dscr(void *val) | ||
502 | { | ||
503 | *(unsigned long *)val = get_paca()->dscr_default; | ||
504 | } | ||
505 | |||
506 | static void write_dscr(void *val) | ||
507 | { | ||
508 | get_paca()->dscr_default = *(unsigned long *)val; | ||
509 | if (!current->thread.dscr_inherit) { | ||
510 | current->thread.dscr = *(unsigned long *)val; | ||
511 | mtspr(SPRN_DSCR, *(unsigned long *)val); | ||
512 | } | ||
513 | } | ||
514 | |||
515 | SYSFS_SPRSETUP_SHOW_STORE(dscr); | ||
516 | static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); | ||
496 | 517 | ||
497 | static void add_write_permission_dev_attr(struct device_attribute *attr) | 518 | static void add_write_permission_dev_attr(struct device_attribute *attr) |
498 | { | 519 | { |
@@ -505,14 +526,6 @@ static ssize_t show_dscr_default(struct device *dev, | |||
505 | return sprintf(buf, "%lx\n", dscr_default); | 526 | return sprintf(buf, "%lx\n", dscr_default); |
506 | } | 527 | } |
507 | 528 | ||
508 | static void update_dscr(void *dummy) | ||
509 | { | ||
510 | if (!current->thread.dscr_inherit) { | ||
511 | current->thread.dscr = dscr_default; | ||
512 | mtspr(SPRN_DSCR, dscr_default); | ||
513 | } | ||
514 | } | ||
515 | |||
516 | static ssize_t __used store_dscr_default(struct device *dev, | 529 | static ssize_t __used store_dscr_default(struct device *dev, |
517 | struct device_attribute *attr, const char *buf, | 530 | struct device_attribute *attr, const char *buf, |
518 | size_t count) | 531 | size_t count) |
@@ -525,7 +538,7 @@ static ssize_t __used store_dscr_default(struct device *dev, | |||
525 | return -EINVAL; | 538 | return -EINVAL; |
526 | dscr_default = val; | 539 | dscr_default = val; |
527 | 540 | ||
528 | on_each_cpu(update_dscr, NULL, 1); | 541 | on_each_cpu(write_dscr, &val, 1); |
529 | 542 | ||
530 | return count; | 543 | return count; |
531 | } | 544 | } |
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 93219c34af32..895c50ca943c 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S | |||
@@ -17,12 +17,12 @@ | |||
17 | #include <asm/ppc_asm.h> | 17 | #include <asm/ppc_asm.h> |
18 | 18 | ||
19 | #ifdef CONFIG_PPC64 | 19 | #ifdef CONFIG_PPC64 |
20 | #define SYSCALL(func) .llong .sys_##func,.sys_##func | 20 | #define SYSCALL(func) .llong DOTSYM(sys_##func),DOTSYM(sys_##func) |
21 | #define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func | 21 | #define COMPAT_SYS(func) .llong DOTSYM(sys_##func),DOTSYM(compat_sys_##func) |
22 | #define PPC_SYS(func) .llong .ppc_##func,.ppc_##func | 22 | #define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) |
23 | #define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall | 23 | #define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) |
24 | #define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func | 24 | #define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) |
25 | #define SYSX(f, f3264, f32) .llong .f,.f3264 | 25 | #define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) |
26 | #else | 26 | #else |
27 | #define SYSCALL(func) .long sys_##func | 27 | #define SYSCALL(func) .long sys_##func |
28 | #define COMPAT_SYS(func) .long sys_##func | 28 | #define COMPAT_SYS(func) .long sys_##func |
@@ -36,6 +36,8 @@ | |||
36 | #define PPC_SYS_SPU(func) PPC_SYS(func) | 36 | #define PPC_SYS_SPU(func) PPC_SYS(func) |
37 | #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) | 37 | #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) |
38 | 38 | ||
39 | .section .rodata,"a" | ||
40 | |||
39 | #ifdef CONFIG_PPC64 | 41 | #ifdef CONFIG_PPC64 |
40 | #define sys_sigpending sys_ni_syscall | 42 | #define sys_sigpending sys_ni_syscall |
41 | #define sys_old_getrlimit sys_ni_syscall | 43 | #define sys_old_getrlimit sys_ni_syscall |
@@ -43,5 +45,7 @@ | |||
43 | .p2align 3 | 45 | .p2align 3 |
44 | #endif | 46 | #endif |
45 | 47 | ||
46 | _GLOBAL(sys_call_table) | 48 | .globl sys_call_table |
49 | sys_call_table: | ||
50 | |||
47 | #include <asm/systbl.h> | 51 | #include <asm/systbl.h> |
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 03567c05950a..2a324f4cb1b9 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <asm/ppc-opcode.h> | 10 | #include <asm/ppc-opcode.h> |
11 | #include <asm/ptrace.h> | 11 | #include <asm/ptrace.h> |
12 | #include <asm/reg.h> | 12 | #include <asm/reg.h> |
13 | #include <asm/bug.h> | ||
13 | 14 | ||
14 | #ifdef CONFIG_VSX | 15 | #ifdef CONFIG_VSX |
15 | /* See fpu.S, this is borrowed from there */ | 16 | /* See fpu.S, this is borrowed from there */ |
@@ -41,7 +42,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ | |||
41 | /* Stack frame offsets for local variables. */ | 42 | /* Stack frame offsets for local variables. */ |
42 | #define TM_FRAME_L0 TM_FRAME_SIZE-16 | 43 | #define TM_FRAME_L0 TM_FRAME_SIZE-16 |
43 | #define TM_FRAME_L1 TM_FRAME_SIZE-8 | 44 | #define TM_FRAME_L1 TM_FRAME_SIZE-8 |
44 | #define STACK_PARAM(x) (48+((x)*8)) | ||
45 | 45 | ||
46 | 46 | ||
47 | /* In order to access the TM SPRs, TM must be enabled. So, do so: */ | 47 | /* In order to access the TM SPRs, TM must be enabled. So, do so: */ |
@@ -78,12 +78,6 @@ _GLOBAL(tm_abort) | |||
78 | TABORT(R3) | 78 | TABORT(R3) |
79 | blr | 79 | blr |
80 | 80 | ||
81 | .section ".toc","aw" | ||
82 | DSCR_DEFAULT: | ||
83 | .tc dscr_default[TC],dscr_default | ||
84 | |||
85 | .section ".text" | ||
86 | |||
87 | /* void tm_reclaim(struct thread_struct *thread, | 81 | /* void tm_reclaim(struct thread_struct *thread, |
88 | * unsigned long orig_msr, | 82 | * unsigned long orig_msr, |
89 | * uint8_t cause) | 83 | * uint8_t cause) |
@@ -108,12 +102,12 @@ _GLOBAL(tm_reclaim) | |||
108 | mflr r0 | 102 | mflr r0 |
109 | stw r6, 8(r1) | 103 | stw r6, 8(r1) |
110 | std r0, 16(r1) | 104 | std r0, 16(r1) |
111 | std r2, 40(r1) | 105 | std r2, STK_GOT(r1) |
112 | stdu r1, -TM_FRAME_SIZE(r1) | 106 | stdu r1, -TM_FRAME_SIZE(r1) |
113 | 107 | ||
114 | /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ | 108 | /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ |
115 | 109 | ||
116 | std r3, STACK_PARAM(0)(r1) | 110 | std r3, STK_PARAM(R3)(r1) |
117 | SAVE_NVGPRS(r1) | 111 | SAVE_NVGPRS(r1) |
118 | 112 | ||
119 | /* We need to setup MSR for VSX register save instructions. Here we | 113 | /* We need to setup MSR for VSX register save instructions. Here we |
@@ -175,6 +169,13 @@ dont_backup_vec: | |||
175 | stfd fr0,FPSTATE_FPSCR(r7) | 169 | stfd fr0,FPSTATE_FPSCR(r7) |
176 | 170 | ||
177 | dont_backup_fp: | 171 | dont_backup_fp: |
172 | /* Do sanity check on MSR to make sure we are suspended */ | ||
173 | li r7, (MSR_TS_S)@higher | ||
174 | srdi r6, r14, 32 | ||
175 | and r6, r6, r7 | ||
176 | 1: tdeqi r6, 0 | ||
177 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 | ||
178 | |||
178 | /* The moment we treclaim, ALL of our GPRs will switch | 179 | /* The moment we treclaim, ALL of our GPRs will switch |
179 | * to user register state. (FPRs, CCR etc. also!) | 180 | * to user register state. (FPRs, CCR etc. also!) |
180 | * Use an sprg and a tm_scratch in the PACA to shuffle. | 181 | * Use an sprg and a tm_scratch in the PACA to shuffle. |
@@ -202,7 +203,7 @@ dont_backup_fp: | |||
202 | /* Now get some more GPRS free */ | 203 | /* Now get some more GPRS free */ |
203 | std r7, GPR7(r1) /* Temporary stash */ | 204 | std r7, GPR7(r1) /* Temporary stash */ |
204 | std r12, GPR12(r1) /* '' '' '' */ | 205 | std r12, GPR12(r1) /* '' '' '' */ |
205 | ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ | 206 | ld r12, STK_PARAM(R3)(r1) /* Param 0, thread_struct * */ |
206 | 207 | ||
207 | std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ | 208 | std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ |
208 | 209 | ||
@@ -289,11 +290,10 @@ dont_backup_fp: | |||
289 | ld r0, 16(r1) | 290 | ld r0, 16(r1) |
290 | mtcr r4 | 291 | mtcr r4 |
291 | mtlr r0 | 292 | mtlr r0 |
292 | ld r2, 40(r1) | 293 | ld r2, STK_GOT(r1) |
293 | 294 | ||
294 | /* Load system default DSCR */ | 295 | /* Load CPU's default DSCR */ |
295 | ld r4, DSCR_DEFAULT@toc(r2) | 296 | ld r0, PACA_DSCR(r13) |
296 | ld r0, 0(r4) | ||
297 | mtspr SPRN_DSCR, r0 | 297 | mtspr SPRN_DSCR, r0 |
298 | 298 | ||
299 | blr | 299 | blr |
@@ -312,7 +312,7 @@ _GLOBAL(__tm_recheckpoint) | |||
312 | mflr r0 | 312 | mflr r0 |
313 | stw r5, 8(r1) | 313 | stw r5, 8(r1) |
314 | std r0, 16(r1) | 314 | std r0, 16(r1) |
315 | std r2, 40(r1) | 315 | std r2, STK_GOT(r1) |
316 | stdu r1, -TM_FRAME_SIZE(r1) | 316 | stdu r1, -TM_FRAME_SIZE(r1) |
317 | 317 | ||
318 | /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. | 318 | /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. |
@@ -320,8 +320,6 @@ _GLOBAL(__tm_recheckpoint) | |||
320 | */ | 320 | */ |
321 | SAVE_NVGPRS(r1) | 321 | SAVE_NVGPRS(r1) |
322 | 322 | ||
323 | std r1, PACAR1(r13) | ||
324 | |||
325 | /* Load complete register state from ts_ckpt* registers */ | 323 | /* Load complete register state from ts_ckpt* registers */ |
326 | 324 | ||
327 | addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */ | 325 | addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */ |
@@ -385,12 +383,10 @@ restore_gprs: | |||
385 | /* ******************** CR,LR,CCR,MSR ********** */ | 383 | /* ******************** CR,LR,CCR,MSR ********** */ |
386 | ld r4, _CTR(r7) | 384 | ld r4, _CTR(r7) |
387 | ld r5, _LINK(r7) | 385 | ld r5, _LINK(r7) |
388 | ld r6, _CCR(r7) | ||
389 | ld r8, _XER(r7) | 386 | ld r8, _XER(r7) |
390 | 387 | ||
391 | mtctr r4 | 388 | mtctr r4 |
392 | mtlr r5 | 389 | mtlr r5 |
393 | mtcr r6 | ||
394 | mtxer r8 | 390 | mtxer r8 |
395 | 391 | ||
396 | /* ******************** TAR ******************** */ | 392 | /* ******************** TAR ******************** */ |
@@ -406,7 +402,8 @@ restore_gprs: | |||
406 | li r4, 0 | 402 | li r4, 0 |
407 | mtmsrd r4, 1 | 403 | mtmsrd r4, 1 |
408 | 404 | ||
409 | REST_4GPRS(0, r7) /* GPR0-3 */ | 405 | REST_GPR(0, r7) /* GPR0 */ |
406 | REST_2GPRS(2, r7) /* GPR2-3 */ | ||
410 | REST_GPR(4, r7) /* GPR4 */ | 407 | REST_GPR(4, r7) /* GPR4 */ |
411 | REST_4GPRS(8, r7) /* GPR8-11 */ | 408 | REST_4GPRS(8, r7) /* GPR8-11 */ |
412 | REST_2GPRS(12, r7) /* GPR12-13 */ | 409 | REST_2GPRS(12, r7) /* GPR12-13 */ |
@@ -418,6 +415,31 @@ restore_gprs: | |||
418 | mtspr SPRN_DSCR, r5 | 415 | mtspr SPRN_DSCR, r5 |
419 | mtspr SPRN_PPR, r6 | 416 | mtspr SPRN_PPR, r6 |
420 | 417 | ||
418 | /* Do final sanity check on TEXASR to make sure FS is set. Do this | ||
419 | * here before we load up the userspace r1 so any bugs we hit will get | ||
420 | * a call chain */ | ||
421 | mfspr r5, SPRN_TEXASR | ||
422 | srdi r5, r5, 16 | ||
423 | li r6, (TEXASR_FS)@h | ||
424 | and r6, r6, r5 | ||
425 | 1: tdeqi r6, 0 | ||
426 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 | ||
427 | |||
428 | /* Do final sanity check on MSR to make sure we are not transactional | ||
429 | * or suspended | ||
430 | */ | ||
431 | mfmsr r6 | ||
432 | li r5, (MSR_TS_MASK)@higher | ||
433 | srdi r6, r6, 32 | ||
434 | and r6, r6, r5 | ||
435 | 1: tdnei r6, 0 | ||
436 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 | ||
437 | |||
438 | /* Restore CR */ | ||
439 | ld r6, _CCR(r7) | ||
440 | mtcr r6 | ||
441 | |||
442 | REST_GPR(1, r7) /* GPR1 */ | ||
421 | REST_GPR(5, r7) /* GPR5-7 */ | 443 | REST_GPR(5, r7) /* GPR5-7 */ |
422 | REST_GPR(6, r7) | 444 | REST_GPR(6, r7) |
423 | ld r7, GPR7(r7) | 445 | ld r7, GPR7(r7) |
@@ -448,11 +470,10 @@ restore_gprs: | |||
448 | ld r0, 16(r1) | 470 | ld r0, 16(r1) |
449 | mtcr r4 | 471 | mtcr r4 |
450 | mtlr r0 | 472 | mtlr r0 |
451 | ld r2, 40(r1) | 473 | ld r2, STK_GOT(r1) |
452 | 474 | ||
453 | /* Load system default DSCR */ | 475 | /* Load CPU's default DSCR */ |
454 | ld r4, DSCR_DEFAULT@toc(r2) | 476 | ld r0, PACA_DSCR(r13) |
455 | ld r0, 0(r4) | ||
456 | mtspr SPRN_DSCR, r0 | 477 | mtspr SPRN_DSCR, r0 |
457 | 478 | ||
458 | blr | 479 | blr |