diff options
Diffstat (limited to 'arch/powerpc')
27 files changed, 255 insertions, 119 deletions
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index 63392f4b29a4..d2008887eb8c 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_KEXEC=y | |||
48 | CONFIG_IRQ_ALL_CPUS=y | 48 | CONFIG_IRQ_ALL_CPUS=y |
49 | CONFIG_MEMORY_HOTPLUG=y | 49 | CONFIG_MEMORY_HOTPLUG=y |
50 | CONFIG_MEMORY_HOTREMOVE=y | 50 | CONFIG_MEMORY_HOTREMOVE=y |
51 | CONFIG_CMA=y | ||
52 | CONFIG_PPC_64K_PAGES=y | 51 | CONFIG_PPC_64K_PAGES=y |
53 | CONFIG_PPC_SUBPAGE_PROT=y | 52 | CONFIG_PPC_SUBPAGE_PROT=y |
54 | CONFIG_SCHED_SMT=y | 53 | CONFIG_SCHED_SMT=y |
@@ -138,6 +137,7 @@ CONFIG_NETCONSOLE=y | |||
138 | CONFIG_NETPOLL_TRAP=y | 137 | CONFIG_NETPOLL_TRAP=y |
139 | CONFIG_TUN=m | 138 | CONFIG_TUN=m |
140 | CONFIG_VIRTIO_NET=m | 139 | CONFIG_VIRTIO_NET=m |
140 | CONFIG_VHOST_NET=m | ||
141 | CONFIG_VORTEX=y | 141 | CONFIG_VORTEX=y |
142 | CONFIG_ACENIC=m | 142 | CONFIG_ACENIC=m |
143 | CONFIG_ACENIC_OMIT_TIGON_I=y | 143 | CONFIG_ACENIC_OMIT_TIGON_I=y |
@@ -303,4 +303,9 @@ CONFIG_CRYPTO_LZO=m | |||
303 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 303 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
304 | CONFIG_CRYPTO_DEV_NX=y | 304 | CONFIG_CRYPTO_DEV_NX=y |
305 | CONFIG_CRYPTO_DEV_NX_ENCRYPT=m | 305 | CONFIG_CRYPTO_DEV_NX_ENCRYPT=m |
306 | CONFIG_VIRTUALIZATION=y | ||
307 | CONFIG_KVM_BOOK3S_64=m | ||
308 | CONFIG_KVM_BOOK3S_64_HV=y | ||
309 | CONFIG_TRANSPARENT_HUGEPAGE=y | ||
310 | CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y | ||
306 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y | 311 | CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y |
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 3b260efbfbf9..ca07f9c27335 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h | |||
@@ -71,9 +71,10 @@ struct device_node; | |||
71 | 71 | ||
72 | #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ | 72 | #define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ |
73 | #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ | 73 | #define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ |
74 | #define EEH_PE_RESET (1 << 2) /* PE reset in progress */ | 74 | #define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */ |
75 | 75 | ||
76 | #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ | 76 | #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ |
77 | #define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ | ||
77 | 78 | ||
78 | struct eeh_pe { | 79 | struct eeh_pe { |
79 | int type; /* PE type: PHB/Bus/Device */ | 80 | int type; /* PE type: PHB/Bus/Device */ |
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 0bb23725b1e7..8bf1b6351716 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h | |||
@@ -34,7 +34,7 @@ | |||
34 | do { \ | 34 | do { \ |
35 | (regs)->result = 0; \ | 35 | (regs)->result = 0; \ |
36 | (regs)->nip = __ip; \ | 36 | (regs)->nip = __ip; \ |
37 | (regs)->gpr[1] = *(unsigned long *)__get_SP(); \ | 37 | (regs)->gpr[1] = current_stack_pointer(); \ |
38 | asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \ | 38 | asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \ |
39 | } while (0) | 39 | } while (0) |
40 | #endif | 40 | #endif |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index fe3f9488f321..c998279bd85b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -1265,8 +1265,7 @@ static inline unsigned long mfvtb (void) | |||
1265 | 1265 | ||
1266 | #define proc_trap() asm volatile("trap") | 1266 | #define proc_trap() asm volatile("trap") |
1267 | 1267 | ||
1268 | #define __get_SP() ({unsigned long sp; \ | 1268 | extern unsigned long current_stack_pointer(void); |
1269 | asm volatile("mr %0,1": "=r" (sp)); sp;}) | ||
1270 | 1269 | ||
1271 | extern unsigned long scom970_read(unsigned int address); | 1270 | extern unsigned long scom970_read(unsigned int address); |
1272 | extern void scom970_write(unsigned int address, unsigned long value); | 1271 | extern void scom970_write(unsigned int address, unsigned long value); |
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 6fa2708da153..6240698fee9a 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | /* ftrace syscalls requires exporting the sys_call_table */ | 20 | /* ftrace syscalls requires exporting the sys_call_table */ |
21 | #ifdef CONFIG_FTRACE_SYSCALLS | 21 | #ifdef CONFIG_FTRACE_SYSCALLS |
22 | extern const unsigned long *sys_call_table; | 22 | extern const unsigned long sys_call_table[]; |
23 | #endif /* CONFIG_FTRACE_SYSCALLS */ | 23 | #endif /* CONFIG_FTRACE_SYSCALLS */ |
24 | 24 | ||
25 | static inline long syscall_get_nr(struct task_struct *task, | 25 | static inline long syscall_get_nr(struct task_struct *task, |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index adac9dc54aee..484b2d4462c1 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -53,9 +53,16 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
53 | #else | 53 | #else |
54 | struct page *page; | 54 | struct page *page; |
55 | int node = dev_to_node(dev); | 55 | int node = dev_to_node(dev); |
56 | #ifdef CONFIG_FSL_SOC | ||
56 | u64 pfn = get_pfn_limit(dev); | 57 | u64 pfn = get_pfn_limit(dev); |
57 | int zone; | 58 | int zone; |
58 | 59 | ||
60 | /* | ||
61 | * This code should be OK on other platforms, but we have drivers that | ||
62 | * don't set coherent_dma_mask. As a workaround we just ifdef it. This | ||
63 | * whole routine needs some serious cleanup. | ||
64 | */ | ||
65 | |||
59 | zone = dma_pfn_limit_to_zone(pfn); | 66 | zone = dma_pfn_limit_to_zone(pfn); |
60 | if (zone < 0) { | 67 | if (zone < 0) { |
61 | dev_err(dev, "%s: No suitable zone for pfn %#llx\n", | 68 | dev_err(dev, "%s: No suitable zone for pfn %#llx\n", |
@@ -73,6 +80,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
73 | break; | 80 | break; |
74 | #endif | 81 | #endif |
75 | }; | 82 | }; |
83 | #endif /* CONFIG_FSL_SOC */ | ||
76 | 84 | ||
77 | /* ignore region specifiers */ | 85 | /* ignore region specifiers */ |
78 | flag &= ~(__GFP_HIGHMEM); | 86 | flag &= ~(__GFP_HIGHMEM); |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index d543e4179c18..2248a1999c64 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -257,6 +257,13 @@ static void *eeh_dump_pe_log(void *data, void *flag) | |||
257 | struct eeh_dev *edev, *tmp; | 257 | struct eeh_dev *edev, *tmp; |
258 | size_t *plen = flag; | 258 | size_t *plen = flag; |
259 | 259 | ||
260 | /* If the PE's config space is blocked, 0xFF's will be | ||
261 | * returned. It's pointless to collect the log in this | ||
262 | * case. | ||
263 | */ | ||
264 | if (pe->state & EEH_PE_CFG_BLOCKED) | ||
265 | return NULL; | ||
266 | |||
260 | eeh_pe_for_each_dev(pe, edev, tmp) | 267 | eeh_pe_for_each_dev(pe, edev, tmp) |
261 | *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, | 268 | *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, |
262 | EEH_PCI_REGS_LOG_LEN - *plen); | 269 | EEH_PCI_REGS_LOG_LEN - *plen); |
@@ -673,18 +680,18 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
673 | switch (state) { | 680 | switch (state) { |
674 | case pcie_deassert_reset: | 681 | case pcie_deassert_reset: |
675 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); | 682 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); |
676 | eeh_pe_state_clear(pe, EEH_PE_RESET); | 683 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
677 | break; | 684 | break; |
678 | case pcie_hot_reset: | 685 | case pcie_hot_reset: |
679 | eeh_pe_state_mark(pe, EEH_PE_RESET); | 686 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
680 | eeh_ops->reset(pe, EEH_RESET_HOT); | 687 | eeh_ops->reset(pe, EEH_RESET_HOT); |
681 | break; | 688 | break; |
682 | case pcie_warm_reset: | 689 | case pcie_warm_reset: |
683 | eeh_pe_state_mark(pe, EEH_PE_RESET); | 690 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
684 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); | 691 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); |
685 | break; | 692 | break; |
686 | default: | 693 | default: |
687 | eeh_pe_state_clear(pe, EEH_PE_RESET); | 694 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
688 | return -EINVAL; | 695 | return -EINVAL; |
689 | }; | 696 | }; |
690 | 697 | ||
@@ -1523,7 +1530,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option) | |||
1523 | switch (option) { | 1530 | switch (option) { |
1524 | case EEH_RESET_DEACTIVATE: | 1531 | case EEH_RESET_DEACTIVATE: |
1525 | ret = eeh_ops->reset(pe, option); | 1532 | ret = eeh_ops->reset(pe, option); |
1526 | eeh_pe_state_clear(pe, EEH_PE_RESET); | 1533 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
1527 | if (ret) | 1534 | if (ret) |
1528 | break; | 1535 | break; |
1529 | 1536 | ||
@@ -1538,7 +1545,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option) | |||
1538 | */ | 1545 | */ |
1539 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | 1546 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); |
1540 | 1547 | ||
1541 | eeh_pe_state_mark(pe, EEH_PE_RESET); | 1548 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
1542 | ret = eeh_ops->reset(pe, option); | 1549 | ret = eeh_ops->reset(pe, option); |
1543 | break; | 1550 | break; |
1544 | default: | 1551 | default: |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 3fd514f8e4b2..6535936bdf27 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -528,13 +528,13 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) | |||
528 | eeh_pe_dev_traverse(pe, eeh_report_error, &result); | 528 | eeh_pe_dev_traverse(pe, eeh_report_error, &result); |
529 | 529 | ||
530 | /* Issue reset */ | 530 | /* Issue reset */ |
531 | eeh_pe_state_mark(pe, EEH_PE_RESET); | 531 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
532 | ret = eeh_reset_pe(pe); | 532 | ret = eeh_reset_pe(pe); |
533 | if (ret) { | 533 | if (ret) { |
534 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET); | 534 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); |
535 | return ret; | 535 | return ret; |
536 | } | 536 | } |
537 | eeh_pe_state_clear(pe, EEH_PE_RESET); | 537 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
538 | 538 | ||
539 | /* Unfreeze the PE */ | 539 | /* Unfreeze the PE */ |
540 | ret = eeh_clear_pe_frozen_state(pe, true); | 540 | ret = eeh_clear_pe_frozen_state(pe, true); |
@@ -601,10 +601,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) | |||
601 | * config accesses. So we prefer to block them. However, controlled | 601 | * config accesses. So we prefer to block them. However, controlled |
602 | * PCI config accesses initiated from EEH itself are allowed. | 602 | * PCI config accesses initiated from EEH itself are allowed. |
603 | */ | 603 | */ |
604 | eeh_pe_state_mark(pe, EEH_PE_RESET); | 604 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
605 | rc = eeh_reset_pe(pe); | 605 | rc = eeh_reset_pe(pe); |
606 | if (rc) { | 606 | if (rc) { |
607 | eeh_pe_state_clear(pe, EEH_PE_RESET); | 607 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
608 | return rc; | 608 | return rc; |
609 | } | 609 | } |
610 | 610 | ||
@@ -613,7 +613,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) | |||
613 | /* Restore PE */ | 613 | /* Restore PE */ |
614 | eeh_ops->configure_bridge(pe); | 614 | eeh_ops->configure_bridge(pe); |
615 | eeh_pe_restore_bars(pe); | 615 | eeh_pe_restore_bars(pe); |
616 | eeh_pe_state_clear(pe, EEH_PE_RESET); | 616 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
617 | 617 | ||
618 | /* Clear frozen state */ | 618 | /* Clear frozen state */ |
619 | rc = eeh_clear_pe_frozen_state(pe, false); | 619 | rc = eeh_clear_pe_frozen_state(pe, false); |
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 53dd0915e690..5a63e2b0f65b 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
@@ -525,7 +525,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag) | |||
525 | pe->state |= state; | 525 | pe->state |= state; |
526 | 526 | ||
527 | /* Offline PCI devices if applicable */ | 527 | /* Offline PCI devices if applicable */ |
528 | if (state != EEH_PE_ISOLATED) | 528 | if (!(state & EEH_PE_ISOLATED)) |
529 | return NULL; | 529 | return NULL; |
530 | 530 | ||
531 | eeh_pe_for_each_dev(pe, edev, tmp) { | 531 | eeh_pe_for_each_dev(pe, edev, tmp) { |
@@ -534,6 +534,10 @@ static void *__eeh_pe_state_mark(void *data, void *flag) | |||
534 | pdev->error_state = pci_channel_io_frozen; | 534 | pdev->error_state = pci_channel_io_frozen; |
535 | } | 535 | } |
536 | 536 | ||
537 | /* Block PCI config access if required */ | ||
538 | if (pe->state & EEH_PE_CFG_RESTRICTED) | ||
539 | pe->state |= EEH_PE_CFG_BLOCKED; | ||
540 | |||
537 | return NULL; | 541 | return NULL; |
538 | } | 542 | } |
539 | 543 | ||
@@ -611,6 +615,10 @@ static void *__eeh_pe_state_clear(void *data, void *flag) | |||
611 | pdev->error_state = pci_channel_io_normal; | 615 | pdev->error_state = pci_channel_io_normal; |
612 | } | 616 | } |
613 | 617 | ||
618 | /* Unblock PCI config access if required */ | ||
619 | if (pe->state & EEH_PE_CFG_RESTRICTED) | ||
620 | pe->state &= ~EEH_PE_CFG_BLOCKED; | ||
621 | |||
614 | return NULL; | 622 | return NULL; |
615 | } | 623 | } |
616 | 624 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 050f79a4a168..72e783ea0681 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -1270,11 +1270,6 @@ hmi_exception_early: | |||
1270 | addi r3,r1,STACK_FRAME_OVERHEAD | 1270 | addi r3,r1,STACK_FRAME_OVERHEAD |
1271 | bl hmi_exception_realmode | 1271 | bl hmi_exception_realmode |
1272 | /* Windup the stack. */ | 1272 | /* Windup the stack. */ |
1273 | /* Clear MSR_RI before setting SRR0 and SRR1. */ | ||
1274 | li r0,MSR_RI | ||
1275 | mfmsr r9 /* get MSR value */ | ||
1276 | andc r9,r9,r0 | ||
1277 | mtmsrd r9,1 /* Clear MSR_RI */ | ||
1278 | /* Move original HSRR0 and HSRR1 into the respective regs */ | 1273 | /* Move original HSRR0 and HSRR1 into the respective regs */ |
1279 | ld r9,_MSR(r1) | 1274 | ld r9,_MSR(r1) |
1280 | mtspr SPRN_HSRR1,r9 | 1275 | mtspr SPRN_HSRR1,r9 |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 8eb857f216c1..c14383575fe8 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -466,7 +466,7 @@ static inline void check_stack_overflow(void) | |||
466 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 466 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
467 | long sp; | 467 | long sp; |
468 | 468 | ||
469 | sp = __get_SP() & (THREAD_SIZE-1); | 469 | sp = current_stack_pointer() & (THREAD_SIZE-1); |
470 | 470 | ||
471 | /* check for stack overflow: is there less than 2KB free? */ | 471 | /* check for stack overflow: is there less than 2KB free? */ |
472 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | 472 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { |
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 7ce26d45777e..0d432194c018 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S | |||
@@ -114,3 +114,7 @@ _GLOBAL(longjmp) | |||
114 | mtlr r0 | 114 | mtlr r0 |
115 | mr r3,r4 | 115 | mr r3,r4 |
116 | blr | 116 | blr |
117 | |||
118 | _GLOBAL(current_stack_pointer) | ||
119 | PPC_LL r3,0(r1) | ||
120 | blr | ||
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index c4dfff6c2719..202963ee013a 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -41,3 +41,5 @@ EXPORT_SYMBOL(giveup_spe); | |||
41 | #ifdef CONFIG_EPAPR_PARAVIRT | 41 | #ifdef CONFIG_EPAPR_PARAVIRT |
42 | EXPORT_SYMBOL(epapr_hypercall_start); | 42 | EXPORT_SYMBOL(epapr_hypercall_start); |
43 | #endif | 43 | #endif |
44 | |||
45 | EXPORT_SYMBOL(current_stack_pointer); | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index aa1df89c8b2a..923cd2daba89 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1545,7 +1545,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) | |||
1545 | tsk = current; | 1545 | tsk = current; |
1546 | if (sp == 0) { | 1546 | if (sp == 0) { |
1547 | if (tsk == current) | 1547 | if (tsk == current) |
1548 | asm("mr %0,1" : "=r" (sp)); | 1548 | sp = current_stack_pointer(); |
1549 | else | 1549 | else |
1550 | sp = tsk->thread.ksp; | 1550 | sp = tsk->thread.ksp; |
1551 | } | 1551 | } |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index c168337aef9d..7c55b86206b3 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -66,6 +66,11 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) | |||
66 | return PCIBIOS_DEVICE_NOT_FOUND; | 66 | return PCIBIOS_DEVICE_NOT_FOUND; |
67 | if (!config_access_valid(pdn, where)) | 67 | if (!config_access_valid(pdn, where)) |
68 | return PCIBIOS_BAD_REGISTER_NUMBER; | 68 | return PCIBIOS_BAD_REGISTER_NUMBER; |
69 | #ifdef CONFIG_EEH | ||
70 | if (pdn->edev && pdn->edev->pe && | ||
71 | (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED)) | ||
72 | return PCIBIOS_SET_FAILED; | ||
73 | #endif | ||
69 | 74 | ||
70 | addr = rtas_config_addr(pdn->busno, pdn->devfn, where); | 75 | addr = rtas_config_addr(pdn->busno, pdn->devfn, where); |
71 | buid = pdn->phb->buid; | 76 | buid = pdn->phb->buid; |
@@ -90,9 +95,6 @@ static int rtas_pci_read_config(struct pci_bus *bus, | |||
90 | struct device_node *busdn, *dn; | 95 | struct device_node *busdn, *dn; |
91 | struct pci_dn *pdn; | 96 | struct pci_dn *pdn; |
92 | bool found = false; | 97 | bool found = false; |
93 | #ifdef CONFIG_EEH | ||
94 | struct eeh_dev *edev; | ||
95 | #endif | ||
96 | int ret; | 98 | int ret; |
97 | 99 | ||
98 | /* Search only direct children of the bus */ | 100 | /* Search only direct children of the bus */ |
@@ -109,11 +111,6 @@ static int rtas_pci_read_config(struct pci_bus *bus, | |||
109 | 111 | ||
110 | if (!found) | 112 | if (!found) |
111 | return PCIBIOS_DEVICE_NOT_FOUND; | 113 | return PCIBIOS_DEVICE_NOT_FOUND; |
112 | #ifdef CONFIG_EEH | ||
113 | edev = of_node_to_eeh_dev(dn); | ||
114 | if (edev && edev->pe && edev->pe->state & EEH_PE_RESET) | ||
115 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
116 | #endif | ||
117 | 114 | ||
118 | ret = rtas_read_config(pdn, where, size, val); | 115 | ret = rtas_read_config(pdn, where, size, val); |
119 | if (*val == EEH_IO_ERROR_VALUE(size) && | 116 | if (*val == EEH_IO_ERROR_VALUE(size) && |
@@ -132,6 +129,11 @@ int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val) | |||
132 | return PCIBIOS_DEVICE_NOT_FOUND; | 129 | return PCIBIOS_DEVICE_NOT_FOUND; |
133 | if (!config_access_valid(pdn, where)) | 130 | if (!config_access_valid(pdn, where)) |
134 | return PCIBIOS_BAD_REGISTER_NUMBER; | 131 | return PCIBIOS_BAD_REGISTER_NUMBER; |
132 | #ifdef CONFIG_EEH | ||
133 | if (pdn->edev && pdn->edev->pe && | ||
134 | (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED)) | ||
135 | return PCIBIOS_SET_FAILED; | ||
136 | #endif | ||
135 | 137 | ||
136 | addr = rtas_config_addr(pdn->busno, pdn->devfn, where); | 138 | addr = rtas_config_addr(pdn->busno, pdn->devfn, where); |
137 | buid = pdn->phb->buid; | 139 | buid = pdn->phb->buid; |
@@ -155,10 +157,6 @@ static int rtas_pci_write_config(struct pci_bus *bus, | |||
155 | struct device_node *busdn, *dn; | 157 | struct device_node *busdn, *dn; |
156 | struct pci_dn *pdn; | 158 | struct pci_dn *pdn; |
157 | bool found = false; | 159 | bool found = false; |
158 | #ifdef CONFIG_EEH | ||
159 | struct eeh_dev *edev; | ||
160 | #endif | ||
161 | int ret; | ||
162 | 160 | ||
163 | /* Search only direct children of the bus */ | 161 | /* Search only direct children of the bus */ |
164 | busdn = pci_bus_to_OF_node(bus); | 162 | busdn = pci_bus_to_OF_node(bus); |
@@ -173,14 +171,8 @@ static int rtas_pci_write_config(struct pci_bus *bus, | |||
173 | 171 | ||
174 | if (!found) | 172 | if (!found) |
175 | return PCIBIOS_DEVICE_NOT_FOUND; | 173 | return PCIBIOS_DEVICE_NOT_FOUND; |
176 | #ifdef CONFIG_EEH | ||
177 | edev = of_node_to_eeh_dev(dn); | ||
178 | if (edev && edev->pe && (edev->pe->state & EEH_PE_RESET)) | ||
179 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
180 | #endif | ||
181 | ret = rtas_write_config(pdn, where, size, val); | ||
182 | 174 | ||
183 | return ret; | 175 | return rtas_write_config(pdn, where, size, val); |
184 | } | 176 | } |
185 | 177 | ||
186 | static struct pci_ops rtas_pci_ops = { | 178 | static struct pci_ops rtas_pci_ops = { |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index cd07d79ad21c..4f3cfe1b6a33 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -522,36 +522,36 @@ void __init setup_system(void) | |||
522 | smp_release_cpus(); | 522 | smp_release_cpus(); |
523 | #endif | 523 | #endif |
524 | 524 | ||
525 | printk("Starting Linux PPC64 %s\n", init_utsname()->version); | 525 | pr_info("Starting Linux PPC64 %s\n", init_utsname()->version); |
526 | 526 | ||
527 | printk("-----------------------------------------------------\n"); | 527 | pr_info("-----------------------------------------------------\n"); |
528 | printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); | 528 | pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); |
529 | printk("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); | 529 | pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); |
530 | 530 | ||
531 | if (ppc64_caches.dline_size != 0x80) | 531 | if (ppc64_caches.dline_size != 0x80) |
532 | printk("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); | 532 | pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); |
533 | if (ppc64_caches.iline_size != 0x80) | 533 | if (ppc64_caches.iline_size != 0x80) |
534 | printk("icache_line_size = 0x%x\n", ppc64_caches.iline_size); | 534 | pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size); |
535 | 535 | ||
536 | printk("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); | 536 | pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); |
537 | printk(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); | 537 | pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); |
538 | printk(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); | 538 | pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); |
539 | printk("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, | 539 | pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, |
540 | cur_cpu_spec->cpu_user_features2); | 540 | cur_cpu_spec->cpu_user_features2); |
541 | printk("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); | 541 | pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); |
542 | printk("firmware_features = 0x%016lx\n", powerpc_firmware_features); | 542 | pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features); |
543 | 543 | ||
544 | #ifdef CONFIG_PPC_STD_MMU_64 | 544 | #ifdef CONFIG_PPC_STD_MMU_64 |
545 | if (htab_address) | 545 | if (htab_address) |
546 | printk("htab_address = 0x%p\n", htab_address); | 546 | pr_info("htab_address = 0x%p\n", htab_address); |
547 | 547 | ||
548 | printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); | 548 | pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask); |
549 | #endif | 549 | #endif |
550 | 550 | ||
551 | if (PHYSICAL_START > 0) | 551 | if (PHYSICAL_START > 0) |
552 | printk("physical_start = 0x%llx\n", | 552 | pr_info("physical_start = 0x%llx\n", |
553 | (unsigned long long)PHYSICAL_START); | 553 | (unsigned long long)PHYSICAL_START); |
554 | printk("-----------------------------------------------------\n"); | 554 | pr_info("-----------------------------------------------------\n"); |
555 | 555 | ||
556 | DBG(" <- setup_system()\n"); | 556 | DBG(" <- setup_system()\n"); |
557 | } | 557 | } |
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 3d30ef1038e5..ea43a347a104 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c | |||
@@ -50,7 +50,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
50 | { | 50 | { |
51 | unsigned long sp; | 51 | unsigned long sp; |
52 | 52 | ||
53 | asm("mr %0,1" : "=r" (sp)); | 53 | sp = current_stack_pointer(); |
54 | 54 | ||
55 | save_context_stack(trace, sp, current, 1); | 55 | save_context_stack(trace, sp, current, 1); |
56 | } | 56 | } |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 649666d5d1c2..e5236c24dc07 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) "numa: " fmt | ||
12 | |||
11 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
12 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
13 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -1153,6 +1155,22 @@ static int __init early_numa(char *p) | |||
1153 | } | 1155 | } |
1154 | early_param("numa", early_numa); | 1156 | early_param("numa", early_numa); |
1155 | 1157 | ||
1158 | static bool topology_updates_enabled = true; | ||
1159 | |||
1160 | static int __init early_topology_updates(char *p) | ||
1161 | { | ||
1162 | if (!p) | ||
1163 | return 0; | ||
1164 | |||
1165 | if (!strcmp(p, "off")) { | ||
1166 | pr_info("Disabling topology updates\n"); | ||
1167 | topology_updates_enabled = false; | ||
1168 | } | ||
1169 | |||
1170 | return 0; | ||
1171 | } | ||
1172 | early_param("topology_updates", early_topology_updates); | ||
1173 | |||
1156 | #ifdef CONFIG_MEMORY_HOTPLUG | 1174 | #ifdef CONFIG_MEMORY_HOTPLUG |
1157 | /* | 1175 | /* |
1158 | * Find the node associated with a hot added memory section for | 1176 | * Find the node associated with a hot added memory section for |
@@ -1442,8 +1460,11 @@ static long hcall_vphn(unsigned long cpu, __be32 *associativity) | |||
1442 | long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; | 1460 | long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; |
1443 | u64 flags = 1; | 1461 | u64 flags = 1; |
1444 | int hwcpu = get_hard_smp_processor_id(cpu); | 1462 | int hwcpu = get_hard_smp_processor_id(cpu); |
1463 | int i; | ||
1445 | 1464 | ||
1446 | rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); | 1465 | rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); |
1466 | for (i = 0; i < 6; i++) | ||
1467 | retbuf[i] = cpu_to_be64(retbuf[i]); | ||
1447 | vphn_unpack_associativity(retbuf, associativity); | 1468 | vphn_unpack_associativity(retbuf, associativity); |
1448 | 1469 | ||
1449 | return rc; | 1470 | return rc; |
@@ -1539,6 +1560,9 @@ int arch_update_cpu_topology(void) | |||
1539 | struct device *dev; | 1560 | struct device *dev; |
1540 | int weight, new_nid, i = 0; | 1561 | int weight, new_nid, i = 0; |
1541 | 1562 | ||
1563 | if (!prrn_enabled && !vphn_enabled) | ||
1564 | return 0; | ||
1565 | |||
1542 | weight = cpumask_weight(&cpu_associativity_changes_mask); | 1566 | weight = cpumask_weight(&cpu_associativity_changes_mask); |
1543 | if (!weight) | 1567 | if (!weight) |
1544 | return 0; | 1568 | return 0; |
@@ -1592,6 +1616,15 @@ int arch_update_cpu_topology(void) | |||
1592 | cpu = cpu_last_thread_sibling(cpu); | 1616 | cpu = cpu_last_thread_sibling(cpu); |
1593 | } | 1617 | } |
1594 | 1618 | ||
1619 | pr_debug("Topology update for the following CPUs:\n"); | ||
1620 | if (cpumask_weight(&updated_cpus)) { | ||
1621 | for (ud = &updates[0]; ud; ud = ud->next) { | ||
1622 | pr_debug("cpu %d moving from node %d " | ||
1623 | "to %d\n", ud->cpu, | ||
1624 | ud->old_nid, ud->new_nid); | ||
1625 | } | ||
1626 | } | ||
1627 | |||
1595 | /* | 1628 | /* |
1596 | * In cases where we have nothing to update (because the updates list | 1629 | * In cases where we have nothing to update (because the updates list |
1597 | * is too short or because the new topology is same as the old one), | 1630 | * is too short or because the new topology is same as the old one), |
@@ -1800,8 +1833,12 @@ static const struct file_operations topology_ops = { | |||
1800 | 1833 | ||
1801 | static int topology_update_init(void) | 1834 | static int topology_update_init(void) |
1802 | { | 1835 | { |
1803 | start_topology_update(); | 1836 | /* Do not poll for changes if disabled at boot */ |
1804 | proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops); | 1837 | if (topology_updates_enabled) |
1838 | start_topology_update(); | ||
1839 | |||
1840 | if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops)) | ||
1841 | return -ENOMEM; | ||
1805 | 1842 | ||
1806 | return 0; | 1843 | return 0; |
1807 | } | 1844 | } |
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 426814a2ede3..eba9cb10619c 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
@@ -373,7 +373,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe) | |||
373 | * moving forward, we have to return operational | 373 | * moving forward, we have to return operational |
374 | * state during PE reset. | 374 | * state during PE reset. |
375 | */ | 375 | */ |
376 | if (pe->state & EEH_PE_RESET) { | 376 | if (pe->state & EEH_PE_CFG_BLOCKED) { |
377 | result = (EEH_STATE_MMIO_ACTIVE | | 377 | result = (EEH_STATE_MMIO_ACTIVE | |
378 | EEH_STATE_DMA_ACTIVE | | 378 | EEH_STATE_DMA_ACTIVE | |
379 | EEH_STATE_MMIO_ENABLED | | 379 | EEH_STATE_MMIO_ENABLED | |
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 3e89cbf55885..1d19e7917d7f 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c | |||
@@ -169,6 +169,26 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | |||
169 | } | 169 | } |
170 | 170 | ||
171 | /* | 171 | /* |
172 | * If the PE contains any one of following adapters, the | ||
173 | * PCI config space can't be accessed when dumping EEH log. | ||
174 | * Otherwise, we will run into fenced PHB caused by shortage | ||
175 | * of outbound credits in the adapter. The PCI config access | ||
176 | * should be blocked until PE reset. MMIO access is dropped | ||
177 | * by hardware certainly. In order to drop PCI config requests, | ||
178 | * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which | ||
179 | * will be checked in the backend for PE state retrival. If | ||
180 | * the PE becomes frozen for the first time and the flag has | ||
181 | * been set for the PE, we will set EEH_PE_CFG_BLOCKED for | ||
182 | * that PE to block its config space. | ||
183 | * | ||
184 | * Broadcom Austin 4-ports NICs (14e4:1657) | ||
185 | * Broadcom Shiner 2-ports 10G NICs (14e4:168e) | ||
186 | */ | ||
187 | if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) || | ||
188 | (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e)) | ||
189 | edev->pe->state |= EEH_PE_CFG_RESTRICTED; | ||
190 | |||
191 | /* | ||
172 | * Cache the PE primary bus, which can't be fetched when | 192 | * Cache the PE primary bus, which can't be fetched when |
173 | * full hotplug is in progress. In that case, all child | 193 | * full hotplug is in progress. In that case, all child |
174 | * PCI devices of the PE are expected to be removed prior | 194 | * PCI devices of the PE are expected to be removed prior |
@@ -383,6 +403,39 @@ static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func, | |||
383 | return ret; | 403 | return ret; |
384 | } | 404 | } |
385 | 405 | ||
406 | static inline bool powernv_eeh_cfg_blocked(struct device_node *dn) | ||
407 | { | ||
408 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | ||
409 | |||
410 | if (!edev || !edev->pe) | ||
411 | return false; | ||
412 | |||
413 | if (edev->pe->state & EEH_PE_CFG_BLOCKED) | ||
414 | return true; | ||
415 | |||
416 | return false; | ||
417 | } | ||
418 | |||
419 | static int powernv_eeh_read_config(struct device_node *dn, | ||
420 | int where, int size, u32 *val) | ||
421 | { | ||
422 | if (powernv_eeh_cfg_blocked(dn)) { | ||
423 | *val = 0xFFFFFFFF; | ||
424 | return PCIBIOS_SET_FAILED; | ||
425 | } | ||
426 | |||
427 | return pnv_pci_cfg_read(dn, where, size, val); | ||
428 | } | ||
429 | |||
430 | static int powernv_eeh_write_config(struct device_node *dn, | ||
431 | int where, int size, u32 val) | ||
432 | { | ||
433 | if (powernv_eeh_cfg_blocked(dn)) | ||
434 | return PCIBIOS_SET_FAILED; | ||
435 | |||
436 | return pnv_pci_cfg_write(dn, where, size, val); | ||
437 | } | ||
438 | |||
386 | /** | 439 | /** |
387 | * powernv_eeh_next_error - Retrieve next EEH error to handle | 440 | * powernv_eeh_next_error - Retrieve next EEH error to handle |
388 | * @pe: Affected PE | 441 | * @pe: Affected PE |
@@ -440,8 +493,8 @@ static struct eeh_ops powernv_eeh_ops = { | |||
440 | .get_log = powernv_eeh_get_log, | 493 | .get_log = powernv_eeh_get_log, |
441 | .configure_bridge = powernv_eeh_configure_bridge, | 494 | .configure_bridge = powernv_eeh_configure_bridge, |
442 | .err_inject = powernv_eeh_err_inject, | 495 | .err_inject = powernv_eeh_err_inject, |
443 | .read_config = pnv_pci_cfg_read, | 496 | .read_config = powernv_eeh_read_config, |
444 | .write_config = pnv_pci_cfg_write, | 497 | .write_config = powernv_eeh_write_config, |
445 | .next_error = powernv_eeh_next_error, | 498 | .next_error = powernv_eeh_next_error, |
446 | .restore_config = powernv_eeh_restore_config | 499 | .restore_config = powernv_eeh_restore_config |
447 | }; | 500 | }; |
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index b642b0562f5a..d019b081df9d 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
@@ -194,6 +194,27 @@ static int __init opal_register_exception_handlers(void) | |||
194 | * fwnmi area at 0x7000 to provide the glue space to OPAL | 194 | * fwnmi area at 0x7000 to provide the glue space to OPAL |
195 | */ | 195 | */ |
196 | glue = 0x7000; | 196 | glue = 0x7000; |
197 | |||
198 | /* | ||
199 | * Check if we are running on newer firmware that exports | ||
200 | * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch | ||
201 | * the HMI interrupt and we catch it directly in Linux. | ||
202 | * | ||
203 | * For older firmware (i.e currently released POWER8 System Firmware | ||
204 | * as of today <= SV810_087), we fallback to old behavior and let OPAL | ||
205 | * patch the HMI vector and handle it inside OPAL firmware. | ||
206 | * | ||
207 | * For newer firmware (in development/yet to be released) we will | ||
208 | * start catching/handling HMI directly in Linux. | ||
209 | */ | ||
210 | if (!opal_check_token(OPAL_HANDLE_HMI)) { | ||
211 | pr_info("opal: Old firmware detected, OPAL handles HMIs.\n"); | ||
212 | opal_register_exception_handler( | ||
213 | OPAL_HYPERVISOR_MAINTENANCE_HANDLER, | ||
214 | 0, glue); | ||
215 | glue += 128; | ||
216 | } | ||
217 | |||
197 | opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); | 218 | opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); |
198 | #endif | 219 | #endif |
199 | 220 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b3ca77ddf36d..b2187d0068b8 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -505,7 +505,7 @@ static bool pnv_pci_cfg_check(struct pci_controller *hose, | |||
505 | edev = of_node_to_eeh_dev(dn); | 505 | edev = of_node_to_eeh_dev(dn); |
506 | if (edev) { | 506 | if (edev) { |
507 | if (edev->pe && | 507 | if (edev->pe && |
508 | (edev->pe->state & EEH_PE_RESET)) | 508 | (edev->pe->state & EEH_PE_CFG_BLOCKED)) |
509 | return false; | 509 | return false; |
510 | 510 | ||
511 | if (edev->mode & EEH_DEV_REMOVED) | 511 | if (edev->mode & EEH_DEV_REMOVED) |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index fdf01b660d59..6ad83bd11fe2 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -25,11 +25,11 @@ | |||
25 | #include <asm/rtas.h> | 25 | #include <asm/rtas.h> |
26 | 26 | ||
27 | struct cc_workarea { | 27 | struct cc_workarea { |
28 | u32 drc_index; | 28 | __be32 drc_index; |
29 | u32 zero; | 29 | __be32 zero; |
30 | u32 name_offset; | 30 | __be32 name_offset; |
31 | u32 prop_length; | 31 | __be32 prop_length; |
32 | u32 prop_offset; | 32 | __be32 prop_offset; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | void dlpar_free_cc_property(struct property *prop) | 35 | void dlpar_free_cc_property(struct property *prop) |
@@ -49,11 +49,11 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) | |||
49 | if (!prop) | 49 | if (!prop) |
50 | return NULL; | 50 | return NULL; |
51 | 51 | ||
52 | name = (char *)ccwa + ccwa->name_offset; | 52 | name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); |
53 | prop->name = kstrdup(name, GFP_KERNEL); | 53 | prop->name = kstrdup(name, GFP_KERNEL); |
54 | 54 | ||
55 | prop->length = ccwa->prop_length; | 55 | prop->length = be32_to_cpu(ccwa->prop_length); |
56 | value = (char *)ccwa + ccwa->prop_offset; | 56 | value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset); |
57 | prop->value = kmemdup(value, prop->length, GFP_KERNEL); | 57 | prop->value = kmemdup(value, prop->length, GFP_KERNEL); |
58 | if (!prop->value) { | 58 | if (!prop->value) { |
59 | dlpar_free_cc_property(prop); | 59 | dlpar_free_cc_property(prop); |
@@ -79,7 +79,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa, | |||
79 | if (!dn) | 79 | if (!dn) |
80 | return NULL; | 80 | return NULL; |
81 | 81 | ||
82 | name = (char *)ccwa + ccwa->name_offset; | 82 | name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); |
83 | dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name); | 83 | dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name); |
84 | if (!dn->full_name) { | 84 | if (!dn->full_name) { |
85 | kfree(dn); | 85 | kfree(dn); |
@@ -126,7 +126,7 @@ void dlpar_free_cc_nodes(struct device_node *dn) | |||
126 | #define CALL_AGAIN -2 | 126 | #define CALL_AGAIN -2 |
127 | #define ERR_CFG_USE -9003 | 127 | #define ERR_CFG_USE -9003 |
128 | 128 | ||
129 | struct device_node *dlpar_configure_connector(u32 drc_index, | 129 | struct device_node *dlpar_configure_connector(__be32 drc_index, |
130 | struct device_node *parent) | 130 | struct device_node *parent) |
131 | { | 131 | { |
132 | struct device_node *dn; | 132 | struct device_node *dn; |
@@ -414,7 +414,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | |||
414 | if (!parent) | 414 | if (!parent) |
415 | return -ENODEV; | 415 | return -ENODEV; |
416 | 416 | ||
417 | dn = dlpar_configure_connector(drc_index, parent); | 417 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); |
418 | if (!dn) | 418 | if (!dn) |
419 | return -EINVAL; | 419 | return -EINVAL; |
420 | 420 | ||
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index b174fa751d26..5c375f93c669 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -247,7 +247,7 @@ static int pseries_add_processor(struct device_node *np) | |||
247 | unsigned int cpu; | 247 | unsigned int cpu; |
248 | cpumask_var_t candidate_mask, tmp; | 248 | cpumask_var_t candidate_mask, tmp; |
249 | int err = -ENOSPC, len, nthreads, i; | 249 | int err = -ENOSPC, len, nthreads, i; |
250 | const u32 *intserv; | 250 | const __be32 *intserv; |
251 | 251 | ||
252 | intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); | 252 | intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); |
253 | if (!intserv) | 253 | if (!intserv) |
@@ -293,7 +293,7 @@ static int pseries_add_processor(struct device_node *np) | |||
293 | for_each_cpu(cpu, tmp) { | 293 | for_each_cpu(cpu, tmp) { |
294 | BUG_ON(cpu_present(cpu)); | 294 | BUG_ON(cpu_present(cpu)); |
295 | set_cpu_present(cpu, true); | 295 | set_cpu_present(cpu, true); |
296 | set_hard_smp_processor_id(cpu, *intserv++); | 296 | set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++)); |
297 | } | 297 | } |
298 | err = 0; | 298 | err = 0; |
299 | out_unlock: | 299 | out_unlock: |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index de1ec54a2a57..e32e00976a94 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
31 | #include <linux/memblock.h> | 31 | #include <linux/memblock.h> |
32 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
33 | #include <linux/sched.h> /* for show_stack */ | ||
34 | #include <linux/string.h> | 33 | #include <linux/string.h> |
35 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
36 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
@@ -168,7 +167,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
168 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | 167 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); |
169 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); | 168 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); |
170 | printk("\ttce val = 0x%llx\n", tce ); | 169 | printk("\ttce val = 0x%llx\n", tce ); |
171 | show_stack(current, (unsigned long *)__get_SP()); | 170 | dump_stack(); |
172 | } | 171 | } |
173 | 172 | ||
174 | tcenum++; | 173 | tcenum++; |
@@ -257,7 +256,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
257 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | 256 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); |
258 | printk("\tnpages = 0x%llx\n", (u64)npages); | 257 | printk("\tnpages = 0x%llx\n", (u64)npages); |
259 | printk("\ttce[0] val = 0x%llx\n", tcep[0]); | 258 | printk("\ttce[0] val = 0x%llx\n", tcep[0]); |
260 | show_stack(current, (unsigned long *)__get_SP()); | 259 | dump_stack(); |
261 | } | 260 | } |
262 | return ret; | 261 | return ret; |
263 | } | 262 | } |
@@ -273,7 +272,7 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages | |||
273 | printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); | 272 | printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); |
274 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | 273 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); |
275 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); | 274 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); |
276 | show_stack(current, (unsigned long *)__get_SP()); | 275 | dump_stack(); |
277 | } | 276 | } |
278 | 277 | ||
279 | tcenum++; | 278 | tcenum++; |
@@ -292,7 +291,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n | |||
292 | printk("\trc = %lld\n", rc); | 291 | printk("\trc = %lld\n", rc); |
293 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | 292 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); |
294 | printk("\tnpages = 0x%llx\n", (u64)npages); | 293 | printk("\tnpages = 0x%llx\n", (u64)npages); |
295 | show_stack(current, (unsigned long *)__get_SP()); | 294 | dump_stack(); |
296 | } | 295 | } |
297 | } | 296 | } |
298 | 297 | ||
@@ -307,7 +306,7 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) | |||
307 | printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); | 306 | printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); |
308 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); | 307 | printk("\tindex = 0x%llx\n", (u64)tbl->it_index); |
309 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); | 308 | printk("\ttcenum = 0x%llx\n", (u64)tcenum); |
310 | show_stack(current, (unsigned long *)__get_SP()); | 309 | dump_stack(); |
311 | } | 310 | } |
312 | 311 | ||
313 | return tce_ret; | 312 | return tce_ret; |
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 361add62abf1..1796c5438cc6 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h | |||
@@ -56,7 +56,8 @@ extern void hvc_vio_init_early(void); | |||
56 | /* Dynamic logical Partitioning/Mobility */ | 56 | /* Dynamic logical Partitioning/Mobility */ |
57 | extern void dlpar_free_cc_nodes(struct device_node *); | 57 | extern void dlpar_free_cc_nodes(struct device_node *); |
58 | extern void dlpar_free_cc_property(struct property *); | 58 | extern void dlpar_free_cc_property(struct property *); |
59 | extern struct device_node *dlpar_configure_connector(u32, struct device_node *); | 59 | extern struct device_node *dlpar_configure_connector(__be32, |
60 | struct device_node *); | ||
60 | extern int dlpar_attach_node(struct device_node *); | 61 | extern int dlpar_attach_node(struct device_node *); |
61 | extern int dlpar_detach_node(struct device_node *); | 62 | extern int dlpar_detach_node(struct device_node *); |
62 | 63 | ||
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 0c75214b6f92..73b64c73505b 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c | |||
@@ -145,59 +145,64 @@ void msi_bitmap_free(struct msi_bitmap *bmp) | |||
145 | 145 | ||
146 | #ifdef CONFIG_MSI_BITMAP_SELFTEST | 146 | #ifdef CONFIG_MSI_BITMAP_SELFTEST |
147 | 147 | ||
148 | #define check(x) \ | ||
149 | if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__); | ||
150 | |||
151 | static void __init test_basics(void) | 148 | static void __init test_basics(void) |
152 | { | 149 | { |
153 | struct msi_bitmap bmp; | 150 | struct msi_bitmap bmp; |
154 | int i, size = 512; | 151 | int rc, i, size = 512; |
155 | 152 | ||
156 | /* Can't allocate a bitmap of 0 irqs */ | 153 | /* Can't allocate a bitmap of 0 irqs */ |
157 | check(msi_bitmap_alloc(&bmp, 0, NULL) != 0); | 154 | WARN_ON(msi_bitmap_alloc(&bmp, 0, NULL) == 0); |
158 | 155 | ||
159 | /* of_node may be NULL */ | 156 | /* of_node may be NULL */ |
160 | check(0 == msi_bitmap_alloc(&bmp, size, NULL)); | 157 | WARN_ON(msi_bitmap_alloc(&bmp, size, NULL)); |
161 | 158 | ||
162 | /* Should all be free by default */ | 159 | /* Should all be free by default */ |
163 | check(0 == bitmap_find_free_region(bmp.bitmap, size, | 160 | WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); |
164 | get_count_order(size))); | ||
165 | bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); | 161 | bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); |
166 | 162 | ||
167 | /* With no node, there's no msi-available-ranges, so expect > 0 */ | 163 | /* With no node, there's no msi-available-ranges, so expect > 0 */ |
168 | check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); | 164 | WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0); |
169 | 165 | ||
170 | /* Should all still be free */ | 166 | /* Should all still be free */ |
171 | check(0 == bitmap_find_free_region(bmp.bitmap, size, | 167 | WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); |
172 | get_count_order(size))); | ||
173 | bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); | 168 | bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); |
174 | 169 | ||
175 | /* Check we can fill it up and then no more */ | 170 | /* Check we can fill it up and then no more */ |
176 | for (i = 0; i < size; i++) | 171 | for (i = 0; i < size; i++) |
177 | check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); | 172 | WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); |
178 | 173 | ||
179 | check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); | 174 | WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); |
180 | 175 | ||
181 | /* Should all be allocated */ | 176 | /* Should all be allocated */ |
182 | check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0); | 177 | WARN_ON(bitmap_find_free_region(bmp.bitmap, size, 0) >= 0); |
183 | 178 | ||
184 | /* And if we free one we can then allocate another */ | 179 | /* And if we free one we can then allocate another */ |
185 | msi_bitmap_free_hwirqs(&bmp, size / 2, 1); | 180 | msi_bitmap_free_hwirqs(&bmp, size / 2, 1); |
186 | check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2); | 181 | WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) != size / 2); |
182 | |||
183 | /* Free most of them for the alignment tests */ | ||
184 | msi_bitmap_free_hwirqs(&bmp, 3, size - 3); | ||
187 | 185 | ||
188 | /* Check we get a naturally aligned offset */ | 186 | /* Check we get a naturally aligned offset */ |
189 | check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0); | 187 | rc = msi_bitmap_alloc_hwirqs(&bmp, 2); |
190 | check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0); | 188 | WARN_ON(rc < 0 && rc % 2 != 0); |
191 | check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0); | 189 | rc = msi_bitmap_alloc_hwirqs(&bmp, 4); |
192 | check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0); | 190 | WARN_ON(rc < 0 && rc % 4 != 0); |
193 | check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0); | 191 | rc = msi_bitmap_alloc_hwirqs(&bmp, 8); |
194 | check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0); | 192 | WARN_ON(rc < 0 && rc % 8 != 0); |
195 | check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0); | 193 | rc = msi_bitmap_alloc_hwirqs(&bmp, 9); |
194 | WARN_ON(rc < 0 && rc % 16 != 0); | ||
195 | rc = msi_bitmap_alloc_hwirqs(&bmp, 3); | ||
196 | WARN_ON(rc < 0 && rc % 4 != 0); | ||
197 | rc = msi_bitmap_alloc_hwirqs(&bmp, 7); | ||
198 | WARN_ON(rc < 0 && rc % 8 != 0); | ||
199 | rc = msi_bitmap_alloc_hwirqs(&bmp, 121); | ||
200 | WARN_ON(rc < 0 && rc % 128 != 0); | ||
196 | 201 | ||
197 | msi_bitmap_free(&bmp); | 202 | msi_bitmap_free(&bmp); |
198 | 203 | ||
199 | /* Clients may check bitmap == NULL for "not-allocated" */ | 204 | /* Clients may WARN_ON bitmap == NULL for "not-allocated" */ |
200 | check(bmp.bitmap == NULL); | 205 | WARN_ON(bmp.bitmap != NULL); |
201 | 206 | ||
202 | kfree(bmp.bitmap); | 207 | kfree(bmp.bitmap); |
203 | } | 208 | } |
@@ -219,14 +224,13 @@ static void __init test_of_node(void) | |||
219 | of_node_init(&of_node); | 224 | of_node_init(&of_node); |
220 | of_node.full_name = node_name; | 225 | of_node.full_name = node_name; |
221 | 226 | ||
222 | check(0 == msi_bitmap_alloc(&bmp, size, &of_node)); | 227 | WARN_ON(msi_bitmap_alloc(&bmp, size, &of_node)); |
223 | 228 | ||
224 | /* No msi-available-ranges, so expect > 0 */ | 229 | /* No msi-available-ranges, so expect > 0 */ |
225 | check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); | 230 | WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0); |
226 | 231 | ||
227 | /* Should all still be free */ | 232 | /* Should all still be free */ |
228 | check(0 == bitmap_find_free_region(bmp.bitmap, size, | 233 | WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size))); |
229 | get_count_order(size))); | ||
230 | bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); | 234 | bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); |
231 | 235 | ||
232 | /* Now create a fake msi-available-ranges property */ | 236 | /* Now create a fake msi-available-ranges property */ |
@@ -240,11 +244,11 @@ static void __init test_of_node(void) | |||
240 | of_node.properties = ∝ | 244 | of_node.properties = ∝ |
241 | 245 | ||
242 | /* msi-available-ranges, so expect == 0 */ | 246 | /* msi-available-ranges, so expect == 0 */ |
243 | check(msi_bitmap_reserve_dt_hwirqs(&bmp) == 0); | 247 | WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp)); |
244 | 248 | ||
245 | /* Check we got the expected result */ | 249 | /* Check we got the expected result */ |
246 | check(0 == bitmap_parselist(expected_str, expected, size)); | 250 | WARN_ON(bitmap_parselist(expected_str, expected, size)); |
247 | check(bitmap_equal(expected, bmp.bitmap, size)); | 251 | WARN_ON(!bitmap_equal(expected, bmp.bitmap, size)); |
248 | 252 | ||
249 | msi_bitmap_free(&bmp); | 253 | msi_bitmap_free(&bmp); |
250 | kfree(bmp.bitmap); | 254 | kfree(bmp.bitmap); |