diff options
Diffstat (limited to 'arch/powerpc')
49 files changed, 735 insertions, 360 deletions
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 84fdf6857c31..a613d2c82fd9 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h | |||
| @@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len) | |||
| 200 | 200 | ||
| 201 | /* | 201 | /* |
| 202 | * We can't access below the stack pointer in the 32bit ABI and | 202 | * We can't access below the stack pointer in the 32bit ABI and |
| 203 | * can access 288 bytes in the 64bit ABI | 203 | * can access 288 bytes in the 64bit big-endian ABI, |
| 204 | * or 512 bytes with the new ELFv2 little-endian ABI. | ||
| 204 | */ | 205 | */ |
| 205 | if (!is_32bit_task()) | 206 | if (!is_32bit_task()) |
| 206 | usp -= 288; | 207 | usp -= USER_REDZONE_SIZE; |
| 207 | 208 | ||
| 208 | return (void __user *) (usp - len); | 209 | return (void __user *) (usp - len); |
| 209 | } | 210 | } |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e27e9ad6818e..150866b2a3fe 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
| @@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | 136 | extern int dma_set_mask(struct device *dev, u64 dma_mask); |
| 137 | extern int __dma_set_mask(struct device *dev, u64 dma_mask); | ||
| 137 | 138 | ||
| 138 | #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) | 139 | #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) |
| 139 | 140 | ||
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 9e39ceb1d19f..d4dd41fb951b 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h | |||
| @@ -172,10 +172,20 @@ struct eeh_ops { | |||
| 172 | }; | 172 | }; |
| 173 | 173 | ||
| 174 | extern struct eeh_ops *eeh_ops; | 174 | extern struct eeh_ops *eeh_ops; |
| 175 | extern int eeh_subsystem_enabled; | 175 | extern bool eeh_subsystem_enabled; |
| 176 | extern raw_spinlock_t confirm_error_lock; | 176 | extern raw_spinlock_t confirm_error_lock; |
| 177 | extern int eeh_probe_mode; | 177 | extern int eeh_probe_mode; |
| 178 | 178 | ||
| 179 | static inline bool eeh_enabled(void) | ||
| 180 | { | ||
| 181 | return eeh_subsystem_enabled; | ||
| 182 | } | ||
| 183 | |||
| 184 | static inline void eeh_set_enable(bool mode) | ||
| 185 | { | ||
| 186 | eeh_subsystem_enabled = mode; | ||
| 187 | } | ||
| 188 | |||
| 179 | #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ | 189 | #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ |
| 180 | #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ | 190 | #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ |
| 181 | 191 | ||
| @@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *); | |||
| 246 | * If this macro yields TRUE, the caller relays to eeh_check_failure() | 256 | * If this macro yields TRUE, the caller relays to eeh_check_failure() |
| 247 | * which does further tests out of line. | 257 | * which does further tests out of line. |
| 248 | */ | 258 | */ |
| 249 | #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) | 259 | #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled()) |
| 250 | 260 | ||
| 251 | /* | 261 | /* |
| 252 | * Reads from a device which has been isolated by EEH will return | 262 | * Reads from a device which has been isolated by EEH will return |
| @@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *); | |||
| 257 | 267 | ||
| 258 | #else /* !CONFIG_EEH */ | 268 | #else /* !CONFIG_EEH */ |
| 259 | 269 | ||
| 270 | static inline bool eeh_enabled(void) | ||
| 271 | { | ||
| 272 | return false; | ||
| 273 | } | ||
| 274 | |||
| 275 | static inline void eeh_set_enable(bool mode) { } | ||
| 276 | |||
| 260 | static inline int eeh_init(void) | 277 | static inline int eeh_init(void) |
| 261 | { | 278 | { |
| 262 | return 0; | 279 | return 0; |
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index d750336b171d..623f2971ce0e 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h | |||
| @@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
| 127 | unsigned long addr, pte_t *ptep) | 127 | unsigned long addr, pte_t *ptep) |
| 128 | { | 128 | { |
| 129 | #ifdef CONFIG_PPC64 | 129 | #ifdef CONFIG_PPC64 |
| 130 | return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); | 130 | return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); |
| 131 | #else | 131 | #else |
| 132 | return __pte(pte_update(ptep, ~0UL, 0)); | 132 | return __pte(pte_update(ptep, ~0UL, 0)); |
| 133 | #endif | 133 | #endif |
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f7a8036579b5..42632c7a2a4e 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h | |||
| @@ -77,6 +77,7 @@ struct iommu_table { | |||
| 77 | #ifdef CONFIG_IOMMU_API | 77 | #ifdef CONFIG_IOMMU_API |
| 78 | struct iommu_group *it_group; | 78 | struct iommu_group *it_group; |
| 79 | #endif | 79 | #endif |
| 80 | void (*set_bypass)(struct iommu_table *tbl, bool enable); | ||
| 80 | }; | 81 | }; |
| 81 | 82 | ||
| 82 | /* Pure 2^n version of get_order */ | 83 | /* Pure 2^n version of get_order */ |
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 40157e2ca691..ed82142a3251 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
| @@ -816,8 +816,8 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, | |||
| 816 | int64_t opal_pci_poll(uint64_t phb_id); | 816 | int64_t opal_pci_poll(uint64_t phb_id); |
| 817 | int64_t opal_return_cpu(void); | 817 | int64_t opal_return_cpu(void); |
| 818 | 818 | ||
| 819 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); | 819 | int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); |
| 820 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); | 820 | int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val); |
| 821 | 821 | ||
| 822 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 822 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
| 823 | uint32_t addr, uint32_t data, uint32_t sz); | 823 | uint32_t addr, uint32_t data, uint32_t sz); |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index bc141c950b1e..eb9261024f51 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
| @@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
| 195 | static inline unsigned long pte_update(struct mm_struct *mm, | 195 | static inline unsigned long pte_update(struct mm_struct *mm, |
| 196 | unsigned long addr, | 196 | unsigned long addr, |
| 197 | pte_t *ptep, unsigned long clr, | 197 | pte_t *ptep, unsigned long clr, |
| 198 | unsigned long set, | ||
| 198 | int huge) | 199 | int huge) |
| 199 | { | 200 | { |
| 200 | #ifdef PTE_ATOMIC_UPDATES | 201 | #ifdef PTE_ATOMIC_UPDATES |
| @@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm, | |||
| 205 | andi. %1,%0,%6\n\ | 206 | andi. %1,%0,%6\n\ |
| 206 | bne- 1b \n\ | 207 | bne- 1b \n\ |
| 207 | andc %1,%0,%4 \n\ | 208 | andc %1,%0,%4 \n\ |
| 209 | or %1,%1,%7\n\ | ||
| 208 | stdcx. %1,0,%3 \n\ | 210 | stdcx. %1,0,%3 \n\ |
| 209 | bne- 1b" | 211 | bne- 1b" |
| 210 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) | 212 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) |
| 211 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) | 213 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) |
| 212 | : "cc" ); | 214 | : "cc" ); |
| 213 | #else | 215 | #else |
| 214 | unsigned long old = pte_val(*ptep); | 216 | unsigned long old = pte_val(*ptep); |
| 215 | *ptep = __pte(old & ~clr); | 217 | *ptep = __pte((old & ~clr) | set); |
| 216 | #endif | 218 | #endif |
| 217 | /* huge pages use the old page table lock */ | 219 | /* huge pages use the old page table lock */ |
| 218 | if (!huge) | 220 | if (!huge) |
| @@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |||
| 231 | { | 233 | { |
| 232 | unsigned long old; | 234 | unsigned long old; |
| 233 | 235 | ||
| 234 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | 236 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) |
| 235 | return 0; | 237 | return 0; |
| 236 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); | 238 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); |
| 237 | return (old & _PAGE_ACCESSED) != 0; | 239 | return (old & _PAGE_ACCESSED) != 0; |
| 238 | } | 240 | } |
| 239 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 241 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| @@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
| 252 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | 254 | if ((pte_val(*ptep) & _PAGE_RW) == 0) |
| 253 | return; | 255 | return; |
| 254 | 256 | ||
| 255 | pte_update(mm, addr, ptep, _PAGE_RW, 0); | 257 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); |
| 256 | } | 258 | } |
| 257 | 259 | ||
| 258 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | 260 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
| @@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | |||
| 261 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | 263 | if ((pte_val(*ptep) & _PAGE_RW) == 0) |
| 262 | return; | 264 | return; |
| 263 | 265 | ||
| 264 | pte_update(mm, addr, ptep, _PAGE_RW, 1); | 266 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); |
| 265 | } | 267 | } |
| 266 | 268 | ||
| 267 | /* | 269 | /* |
| @@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | |||
| 284 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | 286 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
| 285 | unsigned long addr, pte_t *ptep) | 287 | unsigned long addr, pte_t *ptep) |
| 286 | { | 288 | { |
| 287 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); | 289 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); |
| 288 | return __pte(old); | 290 | return __pte(old); |
| 289 | } | 291 | } |
| 290 | 292 | ||
| 291 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | 293 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
| 292 | pte_t * ptep) | 294 | pte_t * ptep) |
| 293 | { | 295 | { |
| 294 | pte_update(mm, addr, ptep, ~0UL, 0); | 296 | pte_update(mm, addr, ptep, ~0UL, 0, 0); |
| 295 | } | 297 | } |
| 296 | 298 | ||
| 297 | 299 | ||
| @@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |||
| 506 | 508 | ||
| 507 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, | 509 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, |
| 508 | unsigned long addr, | 510 | unsigned long addr, |
| 509 | pmd_t *pmdp, unsigned long clr); | 511 | pmd_t *pmdp, |
| 512 | unsigned long clr, | ||
| 513 | unsigned long set); | ||
| 510 | 514 | ||
| 511 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | 515 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, |
| 512 | unsigned long addr, pmd_t *pmdp) | 516 | unsigned long addr, pmd_t *pmdp) |
| @@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | |||
| 515 | 519 | ||
| 516 | if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | 520 | if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) |
| 517 | return 0; | 521 | return 0; |
| 518 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); | 522 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); |
| 519 | return ((old & _PAGE_ACCESSED) != 0); | 523 | return ((old & _PAGE_ACCESSED) != 0); |
| 520 | } | 524 | } |
| 521 | 525 | ||
| @@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
| 542 | if ((pmd_val(*pmdp) & _PAGE_RW) == 0) | 546 | if ((pmd_val(*pmdp) & _PAGE_RW) == 0) |
| 543 | return; | 547 | return; |
| 544 | 548 | ||
| 545 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); | 549 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); |
| 546 | } | 550 | } |
| 547 | 551 | ||
| 548 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | 552 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index f83b6f3e1b39..3ebb188c3ff5 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
| @@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte) | |||
| 75 | return pte; | 75 | return pte; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | #define ptep_set_numa ptep_set_numa | ||
| 79 | static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, | ||
| 80 | pte_t *ptep) | ||
| 81 | { | ||
| 82 | if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) | ||
| 83 | VM_BUG_ON(1); | ||
| 84 | |||
| 85 | pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); | ||
| 86 | return; | ||
| 87 | } | ||
| 88 | |||
| 78 | #define pmd_numa pmd_numa | 89 | #define pmd_numa pmd_numa |
| 79 | static inline int pmd_numa(pmd_t pmd) | 90 | static inline int pmd_numa(pmd_t pmd) |
| 80 | { | 91 | { |
| 81 | return pte_numa(pmd_pte(pmd)); | 92 | return pte_numa(pmd_pte(pmd)); |
| 82 | } | 93 | } |
| 83 | 94 | ||
| 95 | #define pmdp_set_numa pmdp_set_numa | ||
| 96 | static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, | ||
| 97 | pmd_t *pmdp) | ||
| 98 | { | ||
| 99 | if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) | ||
| 100 | VM_BUG_ON(1); | ||
| 101 | |||
| 102 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); | ||
| 103 | return; | ||
| 104 | } | ||
| 105 | |||
| 84 | #define pmd_mknonnuma pmd_mknonnuma | 106 | #define pmd_mknonnuma pmd_mknonnuma |
| 85 | static inline pmd_t pmd_mknonnuma(pmd_t pmd) | 107 | static inline pmd_t pmd_mknonnuma(pmd_t pmd) |
| 86 | { | 108 | { |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index becc08e6a65c..279b80f3bb29 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
| @@ -28,11 +28,23 @@ | |||
| 28 | 28 | ||
| 29 | #ifdef __powerpc64__ | 29 | #ifdef __powerpc64__ |
| 30 | 30 | ||
| 31 | /* | ||
| 32 | * Size of redzone that userspace is allowed to use below the stack | ||
| 33 | * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in | ||
| 34 | * the new ELFv2 little-endian ABI, so we allow the larger amount. | ||
| 35 | * | ||
| 36 | * For kernel code we allow a 288-byte redzone, in order to conserve | ||
| 37 | * kernel stack space; gcc currently only uses 288 bytes, and will | ||
| 38 | * hopefully allow explicit control of the redzone size in future. | ||
| 39 | */ | ||
| 40 | #define USER_REDZONE_SIZE 512 | ||
| 41 | #define KERNEL_REDZONE_SIZE 288 | ||
| 42 | |||
| 31 | #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ | 43 | #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ |
| 32 | #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ | 44 | #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ |
| 33 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) | 45 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) |
| 34 | #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ | 46 | #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ |
| 35 | STACK_FRAME_OVERHEAD + 288) | 47 | STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) |
| 36 | #define STACK_FRAME_MARKER 12 | 48 | #define STACK_FRAME_MARKER 12 |
| 37 | 49 | ||
| 38 | /* Size of dummy stack frame allocated when calling signal handler. */ | 50 | /* Size of dummy stack frame allocated when calling signal handler. */ |
| @@ -41,6 +53,8 @@ | |||
| 41 | 53 | ||
| 42 | #else /* __powerpc64__ */ | 54 | #else /* __powerpc64__ */ |
| 43 | 55 | ||
| 56 | #define USER_REDZONE_SIZE 0 | ||
| 57 | #define KERNEL_REDZONE_SIZE 0 | ||
| 44 | #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ | 58 | #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ |
| 45 | #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ | 59 | #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ |
| 46 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) | 60 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) |
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 4ee06fe15de4..d0e784e0ff48 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | #ifdef __powerpc64__ | 9 | #ifdef __powerpc64__ |
| 10 | 10 | ||
| 11 | extern char __start_interrupts[]; | ||
| 11 | extern char __end_interrupts[]; | 12 | extern char __end_interrupts[]; |
| 12 | 13 | ||
| 13 | extern char __prom_init_toc_start[]; | 14 | extern char __prom_init_toc_start[]; |
| @@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr) | |||
| 21 | return 0; | 22 | return 0; |
| 22 | } | 23 | } |
| 23 | 24 | ||
| 25 | static inline int overlaps_interrupt_vector_text(unsigned long start, | ||
| 26 | unsigned long end) | ||
| 27 | { | ||
| 28 | unsigned long real_start, real_end; | ||
| 29 | real_start = __start_interrupts - _stext; | ||
| 30 | real_end = __end_interrupts - _stext; | ||
| 31 | |||
| 32 | return start < (unsigned long)__va(real_end) && | ||
| 33 | (unsigned long)__va(real_start) < end; | ||
| 34 | } | ||
| 35 | |||
| 24 | static inline int overlaps_kernel_text(unsigned long start, unsigned long end) | 36 | static inline int overlaps_kernel_text(unsigned long start, unsigned long end) |
| 25 | { | 37 | { |
| 26 | return start < (unsigned long)__init_end && | 38 | return start < (unsigned long)__init_end && |
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 0d9cecddf8a4..c53f5f6d1761 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h | |||
| @@ -4,11 +4,11 @@ | |||
| 4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
| 5 | 5 | ||
| 6 | /* Default link addresses for the vDSOs */ | 6 | /* Default link addresses for the vDSOs */ |
| 7 | #define VDSO32_LBASE 0x100000 | 7 | #define VDSO32_LBASE 0x0 |
| 8 | #define VDSO64_LBASE 0x100000 | 8 | #define VDSO64_LBASE 0x0 |
| 9 | 9 | ||
| 10 | /* Default map addresses for 32bit vDSO */ | 10 | /* Default map addresses for 32bit vDSO */ |
| 11 | #define VDSO32_MBASE VDSO32_LBASE | 11 | #define VDSO32_MBASE 0x100000 |
| 12 | 12 | ||
| 13 | #define VDSO_VERSION_STRING LINUX_2.6.15 | 13 | #define VDSO_VERSION_STRING LINUX_2.6.15 |
| 14 | 14 | ||
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 11c1d069d920..7a13f378ca2c 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
| @@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
| 98 | size_t csize, unsigned long offset, int userbuf) | 98 | size_t csize, unsigned long offset, int userbuf) |
| 99 | { | 99 | { |
| 100 | void *vaddr; | 100 | void *vaddr; |
| 101 | phys_addr_t paddr; | ||
| 101 | 102 | ||
| 102 | if (!csize) | 103 | if (!csize) |
| 103 | return 0; | 104 | return 0; |
| 104 | 105 | ||
| 105 | csize = min_t(size_t, csize, PAGE_SIZE); | 106 | csize = min_t(size_t, csize, PAGE_SIZE); |
| 107 | paddr = pfn << PAGE_SHIFT; | ||
| 106 | 108 | ||
| 107 | if ((min_low_pfn < pfn) && (pfn < max_pfn)) { | 109 | if (memblock_is_region_memory(paddr, csize)) { |
| 108 | vaddr = __va(pfn << PAGE_SHIFT); | 110 | vaddr = __va(paddr); |
| 109 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); | 111 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); |
| 110 | } else { | 112 | } else { |
| 111 | vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); | 113 | vaddr = __ioremap(paddr, PAGE_SIZE, 0); |
| 112 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); | 114 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); |
| 113 | iounmap(vaddr); | 115 | iounmap(vaddr); |
| 114 | } | 116 | } |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8032b97ccdcb..ee78f6e49d64 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
| @@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops); | |||
| 191 | 191 | ||
| 192 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 192 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
| 193 | 193 | ||
| 194 | int dma_set_mask(struct device *dev, u64 dma_mask) | 194 | int __dma_set_mask(struct device *dev, u64 dma_mask) |
| 195 | { | 195 | { |
| 196 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | 196 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
| 197 | 197 | ||
| 198 | if (ppc_md.dma_set_mask) | ||
| 199 | return ppc_md.dma_set_mask(dev, dma_mask); | ||
| 200 | if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) | 198 | if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) |
| 201 | return dma_ops->set_dma_mask(dev, dma_mask); | 199 | return dma_ops->set_dma_mask(dev, dma_mask); |
| 202 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 200 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
| @@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask) | |||
| 204 | *dev->dma_mask = dma_mask; | 202 | *dev->dma_mask = dma_mask; |
| 205 | return 0; | 203 | return 0; |
| 206 | } | 204 | } |
| 205 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
| 206 | { | ||
| 207 | if (ppc_md.dma_set_mask) | ||
| 208 | return ppc_md.dma_set_mask(dev, dma_mask); | ||
| 209 | return __dma_set_mask(dev, dma_mask); | ||
| 210 | } | ||
| 207 | EXPORT_SYMBOL(dma_set_mask); | 211 | EXPORT_SYMBOL(dma_set_mask); |
| 208 | 212 | ||
| 209 | u64 dma_get_required_mask(struct device *dev) | 213 | u64 dma_get_required_mask(struct device *dev) |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 148db72a8c43..e7b76a6bf150 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
| 29 | #include <linux/proc_fs.h> | 29 | #include <linux/proc_fs.h> |
| 30 | #include <linux/rbtree.h> | 30 | #include <linux/rbtree.h> |
| 31 | #include <linux/reboot.h> | ||
| 31 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
| 32 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
| 33 | #include <linux/export.h> | 34 | #include <linux/export.h> |
| @@ -89,7 +90,7 @@ | |||
| 89 | /* Platform dependent EEH operations */ | 90 | /* Platform dependent EEH operations */ |
| 90 | struct eeh_ops *eeh_ops = NULL; | 91 | struct eeh_ops *eeh_ops = NULL; |
| 91 | 92 | ||
| 92 | int eeh_subsystem_enabled; | 93 | bool eeh_subsystem_enabled = false; |
| 93 | EXPORT_SYMBOL(eeh_subsystem_enabled); | 94 | EXPORT_SYMBOL(eeh_subsystem_enabled); |
| 94 | 95 | ||
| 95 | /* | 96 | /* |
| @@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
| 364 | 365 | ||
| 365 | eeh_stats.total_mmio_ffs++; | 366 | eeh_stats.total_mmio_ffs++; |
| 366 | 367 | ||
| 367 | if (!eeh_subsystem_enabled) | 368 | if (!eeh_enabled()) |
| 368 | return 0; | 369 | return 0; |
| 369 | 370 | ||
| 370 | if (!edev) { | 371 | if (!edev) { |
| @@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name) | |||
| 747 | return -EEXIST; | 748 | return -EEXIST; |
| 748 | } | 749 | } |
| 749 | 750 | ||
| 751 | static int eeh_reboot_notifier(struct notifier_block *nb, | ||
| 752 | unsigned long action, void *unused) | ||
| 753 | { | ||
| 754 | eeh_set_enable(false); | ||
| 755 | return NOTIFY_DONE; | ||
| 756 | } | ||
| 757 | |||
| 758 | static struct notifier_block eeh_reboot_nb = { | ||
| 759 | .notifier_call = eeh_reboot_notifier, | ||
| 760 | }; | ||
| 761 | |||
| 750 | /** | 762 | /** |
| 751 | * eeh_init - EEH initialization | 763 | * eeh_init - EEH initialization |
| 752 | * | 764 | * |
| @@ -778,6 +790,14 @@ int eeh_init(void) | |||
| 778 | if (machine_is(powernv) && cnt++ <= 0) | 790 | if (machine_is(powernv) && cnt++ <= 0) |
| 779 | return ret; | 791 | return ret; |
| 780 | 792 | ||
| 793 | /* Register reboot notifier */ | ||
| 794 | ret = register_reboot_notifier(&eeh_reboot_nb); | ||
| 795 | if (ret) { | ||
| 796 | pr_warn("%s: Failed to register notifier (%d)\n", | ||
| 797 | __func__, ret); | ||
| 798 | return ret; | ||
| 799 | } | ||
| 800 | |||
| 781 | /* call platform initialization function */ | 801 | /* call platform initialization function */ |
| 782 | if (!eeh_ops) { | 802 | if (!eeh_ops) { |
| 783 | pr_warning("%s: Platform EEH operation not found\n", | 803 | pr_warning("%s: Platform EEH operation not found\n", |
| @@ -822,7 +842,7 @@ int eeh_init(void) | |||
| 822 | return ret; | 842 | return ret; |
| 823 | } | 843 | } |
| 824 | 844 | ||
| 825 | if (eeh_subsystem_enabled) | 845 | if (eeh_enabled()) |
| 826 | pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); | 846 | pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); |
| 827 | else | 847 | else |
| 828 | pr_warning("EEH: No capable adapters found\n"); | 848 | pr_warning("EEH: No capable adapters found\n"); |
| @@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev) | |||
| 897 | struct device_node *dn; | 917 | struct device_node *dn; |
| 898 | struct eeh_dev *edev; | 918 | struct eeh_dev *edev; |
| 899 | 919 | ||
| 900 | if (!dev || !eeh_subsystem_enabled) | 920 | if (!dev || !eeh_enabled()) |
| 901 | return; | 921 | return; |
| 902 | 922 | ||
| 903 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); | 923 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); |
| @@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev) | |||
| 1005 | { | 1025 | { |
| 1006 | struct eeh_dev *edev; | 1026 | struct eeh_dev *edev; |
| 1007 | 1027 | ||
| 1008 | if (!dev || !eeh_subsystem_enabled) | 1028 | if (!dev || !eeh_enabled()) |
| 1009 | return; | 1029 | return; |
| 1010 | edev = pci_dev_to_eeh_dev(dev); | 1030 | edev = pci_dev_to_eeh_dev(dev); |
| 1011 | 1031 | ||
| @@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev) | |||
| 1045 | 1065 | ||
| 1046 | static int proc_eeh_show(struct seq_file *m, void *v) | 1066 | static int proc_eeh_show(struct seq_file *m, void *v) |
| 1047 | { | 1067 | { |
| 1048 | if (0 == eeh_subsystem_enabled) { | 1068 | if (!eeh_enabled()) { |
| 1049 | seq_printf(m, "EEH Subsystem is globally disabled\n"); | 1069 | seq_printf(m, "EEH Subsystem is globally disabled\n"); |
| 1050 | seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); | 1070 | seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); |
| 1051 | } else { | 1071 | } else { |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7bb30dca4e19..fdc679d309ec 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
| @@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata) | |||
| 362 | */ | 362 | */ |
| 363 | if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) | 363 | if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) |
| 364 | return NULL; | 364 | return NULL; |
| 365 | |||
| 365 | driver = eeh_pcid_get(dev); | 366 | driver = eeh_pcid_get(dev); |
| 366 | if (driver && driver->err_handler) | 367 | if (driver) { |
| 367 | return NULL; | 368 | eeh_pcid_put(dev); |
| 369 | if (driver->err_handler) | ||
| 370 | return NULL; | ||
| 371 | } | ||
| 368 | 372 | ||
| 369 | /* Remove it from PCI subsystem */ | 373 | /* Remove it from PCI subsystem */ |
| 370 | pr_debug("EEH: Removing %s without EEH sensitive driver\n", | 374 | pr_debug("EEH: Removing %s without EEH sensitive driver\n", |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 9b27b293a922..b0ded97ee4e1 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
| @@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) | |||
| 74 | */ | 74 | */ |
| 75 | static int test_24bit_addr(unsigned long ip, unsigned long addr) | 75 | static int test_24bit_addr(unsigned long ip, unsigned long addr) |
| 76 | { | 76 | { |
| 77 | addr = ppc_function_entry((void *)addr); | ||
| 77 | 78 | ||
| 78 | /* use the create_branch to verify that this offset can be branched */ | 79 | /* use the create_branch to verify that this offset can be branched */ |
| 79 | return create_branch((unsigned int *)ip, addr, 0); | 80 | return create_branch((unsigned int *)ip, addr, 0); |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d773dd440a45..88e3ec6e1d96 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
| @@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl) | |||
| 1088 | memset(tbl->it_map, 0xff, sz); | 1088 | memset(tbl->it_map, 0xff, sz); |
| 1089 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); | 1089 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); |
| 1090 | 1090 | ||
| 1091 | /* | ||
| 1092 | * Disable iommu bypass, otherwise the user can DMA to all of | ||
| 1093 | * our physical memory via the bypass window instead of just | ||
| 1094 | * the pages that has been explicitly mapped into the iommu | ||
| 1095 | */ | ||
| 1096 | if (tbl->set_bypass) | ||
| 1097 | tbl->set_bypass(tbl, false); | ||
| 1098 | |||
| 1091 | return 0; | 1099 | return 0; |
| 1092 | } | 1100 | } |
| 1093 | EXPORT_SYMBOL_GPL(iommu_take_ownership); | 1101 | EXPORT_SYMBOL_GPL(iommu_take_ownership); |
| @@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl) | |||
| 1102 | /* Restore bit#0 set by iommu_init_table() */ | 1110 | /* Restore bit#0 set by iommu_init_table() */ |
| 1103 | if (tbl->it_offset == 0) | 1111 | if (tbl->it_offset == 0) |
| 1104 | set_bit(0, tbl->it_map); | 1112 | set_bit(0, tbl->it_map); |
| 1113 | |||
| 1114 | /* The kernel owns the device now, we can restore the iommu bypass */ | ||
| 1115 | if (tbl->set_bypass) | ||
| 1116 | tbl->set_bypass(tbl, true); | ||
| 1105 | } | 1117 | } |
| 1106 | EXPORT_SYMBOL_GPL(iommu_release_ownership); | 1118 | EXPORT_SYMBOL_GPL(iommu_release_ownership); |
| 1107 | 1119 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9729b23bfb0a..1d0848bba049 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
| @@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void) | |||
| 559 | #ifdef CONFIG_PPC64 | 559 | #ifdef CONFIG_PPC64 |
| 560 | cpu_nr = i; | 560 | cpu_nr = i; |
| 561 | #else | 561 | #else |
| 562 | #ifdef CONFIG_SMP | ||
| 562 | cpu_nr = get_hard_smp_processor_id(i); | 563 | cpu_nr = get_hard_smp_processor_id(i); |
| 564 | #else | ||
| 565 | cpu_nr = 0; | ||
| 563 | #endif | 566 | #endif |
| 567 | #endif | ||
| 568 | |||
| 564 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); | 569 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); |
| 565 | tp = critirq_ctx[cpu_nr]; | 570 | tp = critirq_ctx[cpu_nr]; |
| 566 | tp->cpu = cpu_nr; | 571 | tp->cpu = cpu_nr; |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 75d4f7340da8..015ae55c1868 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
| @@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) | |||
| 196 | 196 | ||
| 197 | /* Values we need to export to the second kernel via the device tree. */ | 197 | /* Values we need to export to the second kernel via the device tree. */ |
| 198 | static phys_addr_t kernel_end; | 198 | static phys_addr_t kernel_end; |
| 199 | static phys_addr_t crashk_base; | ||
| 199 | static phys_addr_t crashk_size; | 200 | static phys_addr_t crashk_size; |
| 201 | static unsigned long long mem_limit; | ||
| 200 | 202 | ||
| 201 | static struct property kernel_end_prop = { | 203 | static struct property kernel_end_prop = { |
| 202 | .name = "linux,kernel-end", | 204 | .name = "linux,kernel-end", |
| @@ -207,7 +209,7 @@ static struct property kernel_end_prop = { | |||
| 207 | static struct property crashk_base_prop = { | 209 | static struct property crashk_base_prop = { |
| 208 | .name = "linux,crashkernel-base", | 210 | .name = "linux,crashkernel-base", |
| 209 | .length = sizeof(phys_addr_t), | 211 | .length = sizeof(phys_addr_t), |
| 210 | .value = &crashk_res.start, | 212 | .value = &crashk_base |
| 211 | }; | 213 | }; |
| 212 | 214 | ||
| 213 | static struct property crashk_size_prop = { | 215 | static struct property crashk_size_prop = { |
| @@ -219,9 +221,11 @@ static struct property crashk_size_prop = { | |||
| 219 | static struct property memory_limit_prop = { | 221 | static struct property memory_limit_prop = { |
| 220 | .name = "linux,memory-limit", | 222 | .name = "linux,memory-limit", |
| 221 | .length = sizeof(unsigned long long), | 223 | .length = sizeof(unsigned long long), |
| 222 | .value = &memory_limit, | 224 | .value = &mem_limit, |
| 223 | }; | 225 | }; |
| 224 | 226 | ||
| 227 | #define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) | ||
| 228 | |||
| 225 | static void __init export_crashk_values(struct device_node *node) | 229 | static void __init export_crashk_values(struct device_node *node) |
| 226 | { | 230 | { |
| 227 | struct property *prop; | 231 | struct property *prop; |
| @@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node) | |||
| 237 | of_remove_property(node, prop); | 241 | of_remove_property(node, prop); |
| 238 | 242 | ||
| 239 | if (crashk_res.start != 0) { | 243 | if (crashk_res.start != 0) { |
| 244 | crashk_base = cpu_to_be_ulong(crashk_res.start), | ||
| 240 | of_add_property(node, &crashk_base_prop); | 245 | of_add_property(node, &crashk_base_prop); |
| 241 | crashk_size = resource_size(&crashk_res); | 246 | crashk_size = cpu_to_be_ulong(resource_size(&crashk_res)); |
| 242 | of_add_property(node, &crashk_size_prop); | 247 | of_add_property(node, &crashk_size_prop); |
| 243 | } | 248 | } |
| 244 | 249 | ||
| @@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node) | |||
| 246 | * memory_limit is required by the kexec-tools to limit the | 251 | * memory_limit is required by the kexec-tools to limit the |
| 247 | * crash regions to the actual memory used. | 252 | * crash regions to the actual memory used. |
| 248 | */ | 253 | */ |
| 254 | mem_limit = cpu_to_be_ulong(memory_limit); | ||
| 249 | of_update_property(node, &memory_limit_prop); | 255 | of_update_property(node, &memory_limit_prop); |
| 250 | } | 256 | } |
| 251 | 257 | ||
| @@ -264,7 +270,7 @@ static int __init kexec_setup(void) | |||
| 264 | of_remove_property(node, prop); | 270 | of_remove_property(node, prop); |
| 265 | 271 | ||
| 266 | /* information needed by userspace when using default_machine_kexec */ | 272 | /* information needed by userspace when using default_machine_kexec */ |
| 267 | kernel_end = __pa(_end); | 273 | kernel_end = cpu_to_be_ulong(__pa(_end)); |
| 268 | of_add_property(node, &kernel_end_prop); | 274 | of_add_property(node, &kernel_end_prop); |
| 269 | 275 | ||
| 270 | export_crashk_values(node); | 276 | export_crashk_values(node); |
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index be4e6d648f60..59d229a2a3e0 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
| @@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image) | |||
| 369 | 369 | ||
| 370 | /* Values we need to export to the second kernel via the device tree. */ | 370 | /* Values we need to export to the second kernel via the device tree. */ |
| 371 | static unsigned long htab_base; | 371 | static unsigned long htab_base; |
| 372 | static unsigned long htab_size; | ||
| 372 | 373 | ||
| 373 | static struct property htab_base_prop = { | 374 | static struct property htab_base_prop = { |
| 374 | .name = "linux,htab-base", | 375 | .name = "linux,htab-base", |
| @@ -379,7 +380,7 @@ static struct property htab_base_prop = { | |||
| 379 | static struct property htab_size_prop = { | 380 | static struct property htab_size_prop = { |
| 380 | .name = "linux,htab-size", | 381 | .name = "linux,htab-size", |
| 381 | .length = sizeof(unsigned long), | 382 | .length = sizeof(unsigned long), |
| 382 | .value = &htab_size_bytes, | 383 | .value = &htab_size, |
| 383 | }; | 384 | }; |
| 384 | 385 | ||
| 385 | static int __init export_htab_values(void) | 386 | static int __init export_htab_values(void) |
| @@ -403,8 +404,9 @@ static int __init export_htab_values(void) | |||
| 403 | if (prop) | 404 | if (prop) |
| 404 | of_remove_property(node, prop); | 405 | of_remove_property(node, prop); |
| 405 | 406 | ||
| 406 | htab_base = __pa(htab_address); | 407 | htab_base = cpu_to_be64(__pa(htab_address)); |
| 407 | of_add_property(node, &htab_base_prop); | 408 | of_add_property(node, &htab_base_prop); |
| 409 | htab_size = cpu_to_be64(htab_size_bytes); | ||
| 408 | of_add_property(node, &htab_size_prop); | 410 | of_add_property(node, &htab_size_prop); |
| 409 | 411 | ||
| 410 | of_node_put(node); | 412 | of_node_put(node); |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 879f09620f83..7c6bb4b17b49 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
| @@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq) | |||
| 57 | mtlr r0 | 57 | mtlr r0 |
| 58 | blr | 58 | blr |
| 59 | 59 | ||
| 60 | /* | ||
| 61 | * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); | ||
| 62 | */ | ||
| 60 | _GLOBAL(call_do_irq) | 63 | _GLOBAL(call_do_irq) |
| 61 | mflr r0 | 64 | mflr r0 |
| 62 | stw r0,4(r1) | 65 | stw r0,4(r1) |
| 63 | lwz r10,THREAD+KSP_LIMIT(r2) | 66 | lwz r10,THREAD+KSP_LIMIT(r2) |
| 64 | addi r11,r3,THREAD_INFO_GAP | 67 | addi r11,r4,THREAD_INFO_GAP |
| 65 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) | 68 | stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) |
| 66 | mr r1,r4 | 69 | mr r1,r4 |
| 67 | stw r10,8(r1) | 70 | stw r10,8(r1) |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8d4c247f1738..af064d28b365 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | |||
| 1048 | flush_altivec_to_thread(src); | 1048 | flush_altivec_to_thread(src); |
| 1049 | flush_vsx_to_thread(src); | 1049 | flush_vsx_to_thread(src); |
| 1050 | flush_spe_to_thread(src); | 1050 | flush_spe_to_thread(src); |
| 1051 | /* | ||
| 1052 | * Flush TM state out so we can copy it. __switch_to_tm() does this | ||
| 1053 | * flush but it removes the checkpointed state from the current CPU and | ||
| 1054 | * transitions the CPU out of TM mode. Hence we need to call | ||
| 1055 | * tm_recheckpoint_new_task() (on the same task) to restore the | ||
| 1056 | * checkpointed state back and the TM mode. | ||
| 1057 | */ | ||
| 1058 | __switch_to_tm(src); | ||
| 1059 | tm_recheckpoint_new_task(src); | ||
| 1051 | 1060 | ||
| 1052 | *dst = *src; | 1061 | *dst = *src; |
| 1053 | 1062 | ||
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index b47a0e1ab001..d88736fbece6 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S | |||
| @@ -69,8 +69,8 @@ _GLOBAL(relocate) | |||
| 69 | * R_PPC64_RELATIVE ones. | 69 | * R_PPC64_RELATIVE ones. |
| 70 | */ | 70 | */ |
| 71 | mtctr r8 | 71 | mtctr r8 |
| 72 | 5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ | 72 | 5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */ |
| 73 | cmpwi r0,R_PPC64_RELATIVE | 73 | cmpdi r0,R_PPC64_RELATIVE |
| 74 | bne 6f | 74 | bne 6f |
| 75 | ld r6,0(r9) /* reloc->r_offset */ | 75 | ld r6,0(r9) /* reloc->r_offset */ |
| 76 | ld r0,16(r9) /* reloc->r_addend */ | 76 | ld r0,16(r9) /* reloc->r_addend */ |
| @@ -81,6 +81,7 @@ _GLOBAL(relocate) | |||
| 81 | 81 | ||
| 82 | 6: blr | 82 | 6: blr |
| 83 | 83 | ||
| 84 | .balign 8 | ||
| 84 | p_dyn: .llong __dynamic_start - 0b | 85 | p_dyn: .llong __dynamic_start - 0b |
| 85 | p_rela: .llong __rela_dyn_start - 0b | 86 | p_rela: .llong __rela_dyn_start - 0b |
| 86 | p_st: .llong _stext - 0b | 87 | p_st: .llong _stext - 0b |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 2b0da27eaee4..04cc4fcca78b 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
| @@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void) | |||
| 247 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 247 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
| 248 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ | 248 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ |
| 249 | for_each_possible_cpu(i) { | 249 | for_each_possible_cpu(i) { |
| 250 | #ifdef CONFIG_SMP | ||
| 250 | hw_cpu = get_hard_smp_processor_id(i); | 251 | hw_cpu = get_hard_smp_processor_id(i); |
| 252 | #else | ||
| 253 | hw_cpu = 0; | ||
| 254 | #endif | ||
| 255 | |||
| 251 | critirq_ctx[hw_cpu] = (struct thread_info *) | 256 | critirq_ctx[hw_cpu] = (struct thread_info *) |
| 252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 257 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
| 253 | #ifdef CONFIG_BOOKE | 258 | #ifdef CONFIG_BOOKE |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index e35bf773df7a..8d253c29649b 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
| @@ -65,8 +65,8 @@ struct rt_sigframe { | |||
| 65 | struct siginfo __user *pinfo; | 65 | struct siginfo __user *pinfo; |
| 66 | void __user *puc; | 66 | void __user *puc; |
| 67 | struct siginfo info; | 67 | struct siginfo info; |
| 68 | /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ | 68 | /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ |
| 69 | char abigap[288]; | 69 | char abigap[USER_REDZONE_SIZE]; |
| 70 | } __attribute__ ((aligned (16))); | 70 | } __attribute__ ((aligned (16))); |
| 71 | 71 | ||
| 72 | static const char fmt32[] = KERN_INFO \ | 72 | static const char fmt32[] = KERN_INFO \ |
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S index 79683d0393f5..6ac107ac402a 100644 --- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S +++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | .globl vdso32_start, vdso32_end | 6 | .globl vdso32_start, vdso32_end |
| 7 | .balign PAGE_SIZE | 7 | .balign PAGE_SIZE |
| 8 | vdso32_start: | 8 | vdso32_start: |
| 9 | .incbin "arch/powerpc/kernel/vdso32/vdso32.so" | 9 | .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg" |
| 10 | .balign PAGE_SIZE | 10 | .balign PAGE_SIZE |
| 11 | vdso32_end: | 11 | vdso32_end: |
| 12 | 12 | ||
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S index 8df9e2463007..df60fca6a13d 100644 --- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S +++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | .globl vdso64_start, vdso64_end | 6 | .globl vdso64_start, vdso64_end |
| 7 | .balign PAGE_SIZE | 7 | .balign PAGE_SIZE |
| 8 | vdso64_start: | 8 | vdso64_start: |
| 9 | .incbin "arch/powerpc/kernel/vdso64/vdso64.so" | 9 | .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg" |
| 10 | .balign PAGE_SIZE | 10 | .balign PAGE_SIZE |
| 11 | vdso64_end: | 11 | vdso64_end: |
| 12 | 12 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index e66d4ec04d95..818dce344e82 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -1504,73 +1504,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
| 1504 | 1: addi r8,r8,16 | 1504 | 1: addi r8,r8,16 |
| 1505 | .endr | 1505 | .endr |
| 1506 | 1506 | ||
| 1507 | /* Save DEC */ | ||
| 1508 | mfspr r5,SPRN_DEC | ||
| 1509 | mftb r6 | ||
| 1510 | extsw r5,r5 | ||
| 1511 | add r5,r5,r6 | ||
| 1512 | std r5,VCPU_DEC_EXPIRES(r9) | ||
| 1513 | |||
| 1514 | BEGIN_FTR_SECTION | ||
| 1515 | b 8f | ||
| 1516 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | ||
| 1517 | /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ | ||
| 1518 | mfmsr r8 | ||
| 1519 | li r0, 1 | ||
| 1520 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | ||
| 1521 | mtmsrd r8 | ||
| 1522 | |||
| 1523 | /* Save POWER8-specific registers */ | ||
| 1524 | mfspr r5, SPRN_IAMR | ||
| 1525 | mfspr r6, SPRN_PSPB | ||
| 1526 | mfspr r7, SPRN_FSCR | ||
| 1527 | std r5, VCPU_IAMR(r9) | ||
| 1528 | stw r6, VCPU_PSPB(r9) | ||
| 1529 | std r7, VCPU_FSCR(r9) | ||
| 1530 | mfspr r5, SPRN_IC | ||
| 1531 | mfspr r6, SPRN_VTB | ||
| 1532 | mfspr r7, SPRN_TAR | ||
| 1533 | std r5, VCPU_IC(r9) | ||
| 1534 | std r6, VCPU_VTB(r9) | ||
| 1535 | std r7, VCPU_TAR(r9) | ||
| 1536 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
| 1537 | mfspr r5, SPRN_TFHAR | ||
| 1538 | mfspr r6, SPRN_TFIAR | ||
| 1539 | mfspr r7, SPRN_TEXASR | ||
| 1540 | std r5, VCPU_TFHAR(r9) | ||
| 1541 | std r6, VCPU_TFIAR(r9) | ||
| 1542 | std r7, VCPU_TEXASR(r9) | ||
| 1543 | #endif | ||
| 1544 | mfspr r8, SPRN_EBBHR | ||
| 1545 | std r8, VCPU_EBBHR(r9) | ||
| 1546 | mfspr r5, SPRN_EBBRR | ||
| 1547 | mfspr r6, SPRN_BESCR | ||
| 1548 | mfspr r7, SPRN_CSIGR | ||
| 1549 | mfspr r8, SPRN_TACR | ||
| 1550 | std r5, VCPU_EBBRR(r9) | ||
| 1551 | std r6, VCPU_BESCR(r9) | ||
| 1552 | std r7, VCPU_CSIGR(r9) | ||
| 1553 | std r8, VCPU_TACR(r9) | ||
| 1554 | mfspr r5, SPRN_TCSCR | ||
| 1555 | mfspr r6, SPRN_ACOP | ||
| 1556 | mfspr r7, SPRN_PID | ||
| 1557 | mfspr r8, SPRN_WORT | ||
| 1558 | std r5, VCPU_TCSCR(r9) | ||
| 1559 | std r6, VCPU_ACOP(r9) | ||
| 1560 | stw r7, VCPU_GUEST_PID(r9) | ||
| 1561 | std r8, VCPU_WORT(r9) | ||
| 1562 | 8: | ||
| 1563 | |||
| 1564 | /* Save and reset AMR and UAMOR before turning on the MMU */ | ||
| 1565 | BEGIN_FTR_SECTION | ||
| 1566 | mfspr r5,SPRN_AMR | ||
| 1567 | mfspr r6,SPRN_UAMOR | ||
| 1568 | std r5,VCPU_AMR(r9) | ||
| 1569 | std r6,VCPU_UAMOR(r9) | ||
| 1570 | li r6,0 | ||
| 1571 | mtspr SPRN_AMR,r6 | ||
| 1572 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||
| 1573 | |||
| 1574 | /* Unset guest mode */ | 1507 | /* Unset guest mode */ |
| 1575 | li r0, KVM_GUEST_MODE_NONE | 1508 | li r0, KVM_GUEST_MODE_NONE |
| 1576 | stb r0, HSTATE_IN_GUEST(r13) | 1509 | stb r0, HSTATE_IN_GUEST(r13) |
| @@ -2203,7 +2136,7 @@ BEGIN_FTR_SECTION | |||
| 2203 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 2136 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 2204 | #endif | 2137 | #endif |
| 2205 | mfspr r6,SPRN_VRSAVE | 2138 | mfspr r6,SPRN_VRSAVE |
| 2206 | stw r6,VCPU_VRSAVE(r3) | 2139 | stw r6,VCPU_VRSAVE(r31) |
| 2207 | mtlr r30 | 2140 | mtlr r30 |
| 2208 | mtmsrd r5 | 2141 | mtmsrd r5 |
| 2209 | isync | 2142 | isync |
| @@ -2240,7 +2173,7 @@ BEGIN_FTR_SECTION | |||
| 2240 | bl .load_vr_state | 2173 | bl .load_vr_state |
| 2241 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 2174 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
| 2242 | #endif | 2175 | #endif |
| 2243 | lwz r7,VCPU_VRSAVE(r4) | 2176 | lwz r7,VCPU_VRSAVE(r31) |
| 2244 | mtspr SPRN_VRSAVE,r7 | 2177 | mtspr SPRN_VRSAVE,r7 |
| 2245 | mtlr r30 | 2178 | mtlr r30 |
| 2246 | mr r4,r31 | 2179 | mr r4,r31 |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index de6881259aef..d766d6ee33fe 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
| @@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |||
| 207 | if (overlaps_kernel_text(vaddr, vaddr + step)) | 207 | if (overlaps_kernel_text(vaddr, vaddr + step)) |
| 208 | tprot &= ~HPTE_R_N; | 208 | tprot &= ~HPTE_R_N; |
| 209 | 209 | ||
| 210 | /* | ||
| 211 | * If relocatable, check if it overlaps interrupt vectors that | ||
| 212 | * are copied down to real 0. For relocatable kernel | ||
| 213 | * (e.g. kdump case) we copy interrupt vectors down to real | ||
| 214 | * address 0. Mark that region as executable. This is | ||
| 215 | * because on p8 system with relocation on exception feature | ||
| 216 | * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence | ||
| 217 | * in order to execute the interrupt handlers in virtual | ||
| 218 | * mode the vector region need to be marked as executable. | ||
| 219 | */ | ||
| 220 | if ((PHYSICAL_START > MEMORY_START) && | ||
| 221 | overlaps_interrupt_vector_text(vaddr, vaddr + step)) | ||
| 222 | tprot &= ~HPTE_R_N; | ||
| 223 | |||
| 210 | hash = hpt_hash(vpn, shift, ssize); | 224 | hash = hpt_hash(vpn, shift, ssize); |
| 211 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | 225 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); |
| 212 | 226 | ||
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 65b7b65e8708..62bf5e8e78da 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
| @@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |||
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, | 512 | unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
| 513 | pmd_t *pmdp, unsigned long clr) | 513 | pmd_t *pmdp, unsigned long clr, |
| 514 | unsigned long set) | ||
| 514 | { | 515 | { |
| 515 | 516 | ||
| 516 | unsigned long old, tmp; | 517 | unsigned long old, tmp; |
| @@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, | |||
| 526 | andi. %1,%0,%6\n\ | 527 | andi. %1,%0,%6\n\ |
| 527 | bne- 1b \n\ | 528 | bne- 1b \n\ |
| 528 | andc %1,%0,%4 \n\ | 529 | andc %1,%0,%4 \n\ |
| 530 | or %1,%1,%7\n\ | ||
| 529 | stdcx. %1,0,%3 \n\ | 531 | stdcx. %1,0,%3 \n\ |
| 530 | bne- 1b" | 532 | bne- 1b" |
| 531 | : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) | 533 | : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) |
| 532 | : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) | 534 | : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) |
| 533 | : "cc" ); | 535 | : "cc" ); |
| 534 | #else | 536 | #else |
| 535 | old = pmd_val(*pmdp); | 537 | old = pmd_val(*pmdp); |
| 536 | *pmdp = __pmd(old & ~clr); | 538 | *pmdp = __pmd((old & ~clr) | set); |
| 537 | #endif | 539 | #endif |
| 538 | if (old & _PAGE_HASHPTE) | 540 | if (old & _PAGE_HASHPTE) |
| 539 | hpte_do_hugepage_flush(mm, addr, pmdp); | 541 | hpte_do_hugepage_flush(mm, addr, pmdp); |
| @@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
| 708 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | 710 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 709 | pmd_t *pmdp) | 711 | pmd_t *pmdp) |
| 710 | { | 712 | { |
| 711 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); | 713 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); |
| 712 | } | 714 | } |
| 713 | 715 | ||
| 714 | /* | 716 | /* |
| @@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |||
| 835 | unsigned long old; | 837 | unsigned long old; |
| 836 | pgtable_t *pgtable_slot; | 838 | pgtable_t *pgtable_slot; |
| 837 | 839 | ||
| 838 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); | 840 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
| 839 | old_pmd = __pmd(old); | 841 | old_pmd = __pmd(old); |
| 840 | /* | 842 | /* |
| 841 | * We have pmd == none and we are holding page_table_lock. | 843 | * We have pmd == none and we are holding page_table_lock. |
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index a770df2dae70..6c0b1f5f8d2c 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c | |||
| @@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, | |||
| 78 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | 78 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
| 79 | arch_enter_lazy_mmu_mode(); | 79 | arch_enter_lazy_mmu_mode(); |
| 80 | for (; npages > 0; --npages) { | 80 | for (; npages > 0; --npages) { |
| 81 | pte_update(mm, addr, pte, 0, 0); | 81 | pte_update(mm, addr, pte, 0, 0, 0); |
| 82 | addr += PAGE_SIZE; | 82 | addr += PAGE_SIZE; |
| 83 | ++pte; | 83 | ++pte; |
| 84 | } | 84 | } |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 29b89e863d7c..67cf22083f4c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
| @@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu) | |||
| 1147 | mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); | 1147 | mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); |
| 1148 | 1148 | ||
| 1149 | mb(); | 1149 | mb(); |
| 1150 | if (cpuhw->bhrb_users) | ||
| 1151 | ppmu->config_bhrb(cpuhw->bhrb_filter); | ||
| 1152 | |||
| 1150 | write_mmcr0(cpuhw, mmcr0); | 1153 | write_mmcr0(cpuhw, mmcr0); |
| 1151 | 1154 | ||
| 1152 | /* | 1155 | /* |
| @@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu) | |||
| 1158 | } | 1161 | } |
| 1159 | 1162 | ||
| 1160 | out: | 1163 | out: |
| 1161 | if (cpuhw->bhrb_users) | ||
| 1162 | ppmu->config_bhrb(cpuhw->bhrb_filter); | ||
| 1163 | 1164 | ||
| 1164 | local_irq_restore(flags); | 1165 | local_irq_restore(flags); |
| 1165 | } | 1166 | } |
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index a3f7abd2f13f..96cee20dcd34 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c | |||
| @@ -25,6 +25,37 @@ | |||
| 25 | #define PM_BRU_FIN 0x10068 | 25 | #define PM_BRU_FIN 0x10068 |
| 26 | #define PM_BR_MPRED_CMPL 0x400f6 | 26 | #define PM_BR_MPRED_CMPL 0x400f6 |
| 27 | 27 | ||
| 28 | /* All L1 D cache load references counted at finish, gated by reject */ | ||
| 29 | #define PM_LD_REF_L1 0x100ee | ||
| 30 | /* Load Missed L1 */ | ||
| 31 | #define PM_LD_MISS_L1 0x3e054 | ||
| 32 | /* Store Missed L1 */ | ||
| 33 | #define PM_ST_MISS_L1 0x300f0 | ||
| 34 | /* L1 cache data prefetches */ | ||
| 35 | #define PM_L1_PREF 0x0d8b8 | ||
| 36 | /* Instruction fetches from L1 */ | ||
| 37 | #define PM_INST_FROM_L1 0x04080 | ||
| 38 | /* Demand iCache Miss */ | ||
| 39 | #define PM_L1_ICACHE_MISS 0x200fd | ||
| 40 | /* Instruction Demand sectors wriittent into IL1 */ | ||
| 41 | #define PM_L1_DEMAND_WRITE 0x0408c | ||
| 42 | /* Instruction prefetch written into IL1 */ | ||
| 43 | #define PM_IC_PREF_WRITE 0x0408e | ||
| 44 | /* The data cache was reloaded from local core's L3 due to a demand load */ | ||
| 45 | #define PM_DATA_FROM_L3 0x4c042 | ||
| 46 | /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ | ||
| 47 | #define PM_DATA_FROM_L3MISS 0x300fe | ||
| 48 | /* All successful D-side store dispatches for this thread */ | ||
| 49 | #define PM_L2_ST 0x17080 | ||
| 50 | /* All successful D-side store dispatches for this thread that were L2 Miss */ | ||
| 51 | #define PM_L2_ST_MISS 0x17082 | ||
| 52 | /* Total HW L3 prefetches(Load+store) */ | ||
| 53 | #define PM_L3_PREF_ALL 0x4e052 | ||
| 54 | /* Data PTEG reload */ | ||
| 55 | #define PM_DTLB_MISS 0x300fc | ||
| 56 | /* ITLB Reloaded */ | ||
| 57 | #define PM_ITLB_MISS 0x400fc | ||
| 58 | |||
| 28 | 59 | ||
| 29 | /* | 60 | /* |
| 30 | * Raw event encoding for POWER8: | 61 | * Raw event encoding for POWER8: |
| @@ -557,6 +588,8 @@ static int power8_generic_events[] = { | |||
| 557 | [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, | 588 | [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, |
| 558 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, | 589 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, |
| 559 | [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, | 590 | [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, |
| 591 | [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, | ||
| 592 | [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, | ||
| 560 | }; | 593 | }; |
| 561 | 594 | ||
| 562 | static u64 power8_bhrb_filter_map(u64 branch_sample_type) | 595 | static u64 power8_bhrb_filter_map(u64 branch_sample_type) |
| @@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter) | |||
| 596 | mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); | 629 | mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); |
| 597 | } | 630 | } |
| 598 | 631 | ||
| 632 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
| 633 | |||
| 634 | /* | ||
| 635 | * Table of generalized cache-related events. | ||
| 636 | * 0 means not supported, -1 means nonsensical, other values | ||
| 637 | * are event codes. | ||
| 638 | */ | ||
| 639 | static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | ||
| 640 | [ C(L1D) ] = { | ||
| 641 | [ C(OP_READ) ] = { | ||
| 642 | [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, | ||
| 643 | [ C(RESULT_MISS) ] = PM_LD_MISS_L1, | ||
| 644 | }, | ||
| 645 | [ C(OP_WRITE) ] = { | ||
| 646 | [ C(RESULT_ACCESS) ] = 0, | ||
| 647 | [ C(RESULT_MISS) ] = PM_ST_MISS_L1, | ||
| 648 | }, | ||
| 649 | [ C(OP_PREFETCH) ] = { | ||
| 650 | [ C(RESULT_ACCESS) ] = PM_L1_PREF, | ||
| 651 | [ C(RESULT_MISS) ] = 0, | ||
| 652 | }, | ||
| 653 | }, | ||
| 654 | [ C(L1I) ] = { | ||
| 655 | [ C(OP_READ) ] = { | ||
| 656 | [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, | ||
| 657 | [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, | ||
| 658 | }, | ||
| 659 | [ C(OP_WRITE) ] = { | ||
| 660 | [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, | ||
| 661 | [ C(RESULT_MISS) ] = -1, | ||
| 662 | }, | ||
| 663 | [ C(OP_PREFETCH) ] = { | ||
| 664 | [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, | ||
| 665 | [ C(RESULT_MISS) ] = 0, | ||
| 666 | }, | ||
| 667 | }, | ||
| 668 | [ C(LL) ] = { | ||
| 669 | [ C(OP_READ) ] = { | ||
| 670 | [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, | ||
| 671 | [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, | ||
| 672 | }, | ||
| 673 | [ C(OP_WRITE) ] = { | ||
| 674 | [ C(RESULT_ACCESS) ] = PM_L2_ST, | ||
| 675 | [ C(RESULT_MISS) ] = PM_L2_ST_MISS, | ||
| 676 | }, | ||
| 677 | [ C(OP_PREFETCH) ] = { | ||
| 678 | [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, | ||
| 679 | [ C(RESULT_MISS) ] = 0, | ||
| 680 | }, | ||
| 681 | }, | ||
| 682 | [ C(DTLB) ] = { | ||
| 683 | [ C(OP_READ) ] = { | ||
| 684 | [ C(RESULT_ACCESS) ] = 0, | ||
| 685 | [ C(RESULT_MISS) ] = PM_DTLB_MISS, | ||
| 686 | }, | ||
| 687 | [ C(OP_WRITE) ] = { | ||
| 688 | [ C(RESULT_ACCESS) ] = -1, | ||
| 689 | [ C(RESULT_MISS) ] = -1, | ||
| 690 | }, | ||
| 691 | [ C(OP_PREFETCH) ] = { | ||
| 692 | [ C(RESULT_ACCESS) ] = -1, | ||
| 693 | [ C(RESULT_MISS) ] = -1, | ||
| 694 | }, | ||
| 695 | }, | ||
| 696 | [ C(ITLB) ] = { | ||
| 697 | [ C(OP_READ) ] = { | ||
| 698 | [ C(RESULT_ACCESS) ] = 0, | ||
| 699 | [ C(RESULT_MISS) ] = PM_ITLB_MISS, | ||
| 700 | }, | ||
| 701 | [ C(OP_WRITE) ] = { | ||
| 702 | [ C(RESULT_ACCESS) ] = -1, | ||
| 703 | [ C(RESULT_MISS) ] = -1, | ||
| 704 | }, | ||
| 705 | [ C(OP_PREFETCH) ] = { | ||
| 706 | [ C(RESULT_ACCESS) ] = -1, | ||
| 707 | [ C(RESULT_MISS) ] = -1, | ||
| 708 | }, | ||
| 709 | }, | ||
| 710 | [ C(BPU) ] = { | ||
| 711 | [ C(OP_READ) ] = { | ||
| 712 | [ C(RESULT_ACCESS) ] = PM_BRU_FIN, | ||
| 713 | [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, | ||
| 714 | }, | ||
| 715 | [ C(OP_WRITE) ] = { | ||
| 716 | [ C(RESULT_ACCESS) ] = -1, | ||
| 717 | [ C(RESULT_MISS) ] = -1, | ||
| 718 | }, | ||
| 719 | [ C(OP_PREFETCH) ] = { | ||
| 720 | [ C(RESULT_ACCESS) ] = -1, | ||
| 721 | [ C(RESULT_MISS) ] = -1, | ||
| 722 | }, | ||
| 723 | }, | ||
| 724 | [ C(NODE) ] = { | ||
| 725 | [ C(OP_READ) ] = { | ||
| 726 | [ C(RESULT_ACCESS) ] = -1, | ||
| 727 | [ C(RESULT_MISS) ] = -1, | ||
| 728 | }, | ||
| 729 | [ C(OP_WRITE) ] = { | ||
| 730 | [ C(RESULT_ACCESS) ] = -1, | ||
| 731 | [ C(RESULT_MISS) ] = -1, | ||
| 732 | }, | ||
| 733 | [ C(OP_PREFETCH) ] = { | ||
| 734 | [ C(RESULT_ACCESS) ] = -1, | ||
| 735 | [ C(RESULT_MISS) ] = -1, | ||
| 736 | }, | ||
| 737 | }, | ||
| 738 | }; | ||
| 739 | |||
| 740 | #undef C | ||
| 741 | |||
| 599 | static struct power_pmu power8_pmu = { | 742 | static struct power_pmu power8_pmu = { |
| 600 | .name = "POWER8", | 743 | .name = "POWER8", |
| 601 | .n_counter = 6, | 744 | .n_counter = 6, |
| @@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = { | |||
| 611 | .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, | 754 | .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, |
| 612 | .n_generic = ARRAY_SIZE(power8_generic_events), | 755 | .n_generic = ARRAY_SIZE(power8_generic_events), |
| 613 | .generic_events = power8_generic_events, | 756 | .generic_events = power8_generic_events, |
| 757 | .cache_events = &power8_cache_events, | ||
| 614 | .attr_groups = power8_pmu_attr_groups, | 758 | .attr_groups = power8_pmu_attr_groups, |
| 615 | .bhrb_nr = 32, | 759 | .bhrb_nr = 32, |
| 616 | }; | 760 | }; |
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 5ec1e47a0d77..e865d748179b 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c | |||
| @@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) | |||
| 123 | 123 | ||
| 124 | area->nid = nid; | 124 | area->nid = nid; |
| 125 | area->order = order; | 125 | area->order = order; |
| 126 | area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, | 126 | area->pages = alloc_pages_exact_node(area->nid, |
| 127 | GFP_KERNEL|__GFP_THISNODE, | ||
| 127 | area->order); | 128 | area->order); |
| 128 | 129 | ||
| 129 | if (!area->pages) { | 130 | if (!area->pages) { |
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index e1e71618b70c..253fefe3d1a0 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
| @@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb, | |||
| 44 | 44 | ||
| 45 | /* We simply send special EEH event */ | 45 | /* We simply send special EEH event */ |
| 46 | if ((changed_evts & OPAL_EVENT_PCI_ERROR) && | 46 | if ((changed_evts & OPAL_EVENT_PCI_ERROR) && |
| 47 | (events & OPAL_EVENT_PCI_ERROR)) | 47 | (events & OPAL_EVENT_PCI_ERROR) && |
| 48 | eeh_enabled()) | ||
| 48 | eeh_send_failure_event(NULL); | 49 | eeh_send_failure_event(NULL); |
| 49 | 50 | ||
| 50 | return 0; | 51 | return 0; |
| @@ -113,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get, | |||
| 113 | ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); | 114 | ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); |
| 114 | #endif /* CONFIG_DEBUG_FS */ | 115 | #endif /* CONFIG_DEBUG_FS */ |
| 115 | 116 | ||
| 117 | |||
| 116 | /** | 118 | /** |
| 117 | * ioda_eeh_post_init - Chip dependent post initialization | 119 | * ioda_eeh_post_init - Chip dependent post initialization |
| 118 | * @hose: PCI controller | 120 | * @hose: PCI controller |
| @@ -220,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option) | |||
| 220 | return ret; | 222 | return ret; |
| 221 | } | 223 | } |
| 222 | 224 | ||
| 225 | static void ioda_eeh_phb_diag(struct pci_controller *hose) | ||
| 226 | { | ||
| 227 | struct pnv_phb *phb = hose->private_data; | ||
| 228 | long rc; | ||
| 229 | |||
| 230 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, | ||
| 231 | PNV_PCI_DIAG_BUF_SIZE); | ||
| 232 | if (rc != OPAL_SUCCESS) { | ||
| 233 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | ||
| 234 | __func__, hose->global_number, rc); | ||
| 235 | return; | ||
| 236 | } | ||
| 237 | |||
| 238 | pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); | ||
| 239 | } | ||
| 240 | |||
| 223 | /** | 241 | /** |
| 224 | * ioda_eeh_get_state - Retrieve the state of PE | 242 | * ioda_eeh_get_state - Retrieve the state of PE |
| 225 | * @pe: EEH PE | 243 | * @pe: EEH PE |
| @@ -271,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) | |||
| 271 | result |= EEH_STATE_DMA_ACTIVE; | 289 | result |= EEH_STATE_DMA_ACTIVE; |
| 272 | result |= EEH_STATE_MMIO_ENABLED; | 290 | result |= EEH_STATE_MMIO_ENABLED; |
| 273 | result |= EEH_STATE_DMA_ENABLED; | 291 | result |= EEH_STATE_DMA_ENABLED; |
| 292 | } else if (!(pe->state & EEH_PE_ISOLATED)) { | ||
| 293 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
| 294 | ioda_eeh_phb_diag(hose); | ||
| 274 | } | 295 | } |
| 275 | 296 | ||
| 276 | return result; | 297 | return result; |
| @@ -314,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) | |||
| 314 | __func__, fstate, hose->global_number, pe_no); | 335 | __func__, fstate, hose->global_number, pe_no); |
| 315 | } | 336 | } |
| 316 | 337 | ||
| 338 | /* Dump PHB diag-data for frozen PE */ | ||
| 339 | if (result != EEH_STATE_NOT_SUPPORT && | ||
| 340 | (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) != | ||
| 341 | (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) && | ||
| 342 | !(pe->state & EEH_PE_ISOLATED)) { | ||
| 343 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
| 344 | ioda_eeh_phb_diag(hose); | ||
| 345 | } | ||
| 346 | |||
| 317 | return result; | 347 | return result; |
| 318 | } | 348 | } |
| 319 | 349 | ||
| @@ -489,8 +519,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose, | |||
| 489 | static int ioda_eeh_reset(struct eeh_pe *pe, int option) | 519 | static int ioda_eeh_reset(struct eeh_pe *pe, int option) |
| 490 | { | 520 | { |
| 491 | struct pci_controller *hose = pe->phb; | 521 | struct pci_controller *hose = pe->phb; |
| 492 | struct eeh_dev *edev; | 522 | struct pci_bus *bus; |
| 493 | struct pci_dev *dev; | ||
| 494 | int ret; | 523 | int ret; |
| 495 | 524 | ||
| 496 | /* | 525 | /* |
| @@ -519,73 +548,17 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) | |||
| 519 | if (pe->type & EEH_PE_PHB) { | 548 | if (pe->type & EEH_PE_PHB) { |
| 520 | ret = ioda_eeh_phb_reset(hose, option); | 549 | ret = ioda_eeh_phb_reset(hose, option); |
| 521 | } else { | 550 | } else { |
| 522 | if (pe->type & EEH_PE_DEVICE) { | 551 | bus = eeh_pe_bus_get(pe); |
| 523 | /* | 552 | if (pci_is_root_bus(bus)) |
| 524 | * If it's device PE, we didn't refer to the parent | ||
| 525 | * PCI bus yet. So we have to figure it out indirectly. | ||
| 526 | */ | ||
| 527 | edev = list_first_entry(&pe->edevs, | ||
| 528 | struct eeh_dev, list); | ||
| 529 | dev = eeh_dev_to_pci_dev(edev); | ||
| 530 | dev = dev->bus->self; | ||
| 531 | } else { | ||
| 532 | /* | ||
| 533 | * If it's bus PE, the parent PCI bus is already there | ||
| 534 | * and just pick it up. | ||
| 535 | */ | ||
| 536 | dev = pe->bus->self; | ||
| 537 | } | ||
| 538 | |||
| 539 | /* | ||
| 540 | * Do reset based on the fact that the direct upstream bridge | ||
| 541 | * is root bridge (port) or not. | ||
| 542 | */ | ||
| 543 | if (dev->bus->number == 0) | ||
| 544 | ret = ioda_eeh_root_reset(hose, option); | 553 | ret = ioda_eeh_root_reset(hose, option); |
| 545 | else | 554 | else |
| 546 | ret = ioda_eeh_bridge_reset(hose, dev, option); | 555 | ret = ioda_eeh_bridge_reset(hose, bus->self, option); |
| 547 | } | 556 | } |
| 548 | 557 | ||
| 549 | return ret; | 558 | return ret; |
| 550 | } | 559 | } |
| 551 | 560 | ||
| 552 | /** | 561 | /** |
| 553 | * ioda_eeh_get_log - Retrieve error log | ||
| 554 | * @pe: EEH PE | ||
| 555 | * @severity: Severity level of the log | ||
| 556 | * @drv_log: buffer to store the log | ||
| 557 | * @len: space of the log buffer | ||
| 558 | * | ||
| 559 | * The function is used to retrieve error log from P7IOC. | ||
| 560 | */ | ||
| 561 | static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, | ||
| 562 | char *drv_log, unsigned long len) | ||
| 563 | { | ||
| 564 | s64 ret; | ||
| 565 | unsigned long flags; | ||
| 566 | struct pci_controller *hose = pe->phb; | ||
| 567 | struct pnv_phb *phb = hose->private_data; | ||
| 568 | |||
| 569 | spin_lock_irqsave(&phb->lock, flags); | ||
| 570 | |||
| 571 | ret = opal_pci_get_phb_diag_data2(phb->opal_id, | ||
| 572 | phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); | ||
| 573 | if (ret) { | ||
| 574 | spin_unlock_irqrestore(&phb->lock, flags); | ||
| 575 | pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n", | ||
| 576 | __func__, hose->global_number, pe->addr, ret); | ||
| 577 | return -EIO; | ||
| 578 | } | ||
| 579 | |||
| 580 | /* The PHB diag-data is always indicative */ | ||
| 581 | pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); | ||
| 582 | |||
| 583 | spin_unlock_irqrestore(&phb->lock, flags); | ||
| 584 | |||
| 585 | return 0; | ||
| 586 | } | ||
| 587 | |||
| 588 | /** | ||
| 589 | * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE | 562 | * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE |
| 590 | * @pe: EEH PE | 563 | * @pe: EEH PE |
| 591 | * | 564 | * |
| @@ -666,22 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose) | |||
| 666 | } | 639 | } |
| 667 | } | 640 | } |
| 668 | 641 | ||
| 669 | static void ioda_eeh_phb_diag(struct pci_controller *hose) | ||
| 670 | { | ||
| 671 | struct pnv_phb *phb = hose->private_data; | ||
| 672 | long rc; | ||
| 673 | |||
| 674 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, | ||
| 675 | PNV_PCI_DIAG_BUF_SIZE); | ||
| 676 | if (rc != OPAL_SUCCESS) { | ||
| 677 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | ||
| 678 | __func__, hose->global_number, rc); | ||
| 679 | return; | ||
| 680 | } | ||
| 681 | |||
| 682 | pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); | ||
| 683 | } | ||
| 684 | |||
| 685 | static int ioda_eeh_get_phb_pe(struct pci_controller *hose, | 642 | static int ioda_eeh_get_phb_pe(struct pci_controller *hose, |
| 686 | struct eeh_pe **pe) | 643 | struct eeh_pe **pe) |
| 687 | { | 644 | { |
| @@ -855,6 +812,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) | |||
| 855 | } | 812 | } |
| 856 | 813 | ||
| 857 | /* | 814 | /* |
| 815 | * EEH core will try recover from fenced PHB or | ||
| 816 | * frozen PE. In the time for frozen PE, EEH core | ||
| 817 | * enable IO path for that before collecting logs, | ||
| 818 | * but it ruins the site. So we have to dump the | ||
| 819 | * log in advance here. | ||
| 820 | */ | ||
| 821 | if ((ret == EEH_NEXT_ERR_FROZEN_PE || | ||
| 822 | ret == EEH_NEXT_ERR_FENCED_PHB) && | ||
| 823 | !((*pe)->state & EEH_PE_ISOLATED)) { | ||
| 824 | eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); | ||
| 825 | ioda_eeh_phb_diag(hose); | ||
| 826 | } | ||
| 827 | |||
| 828 | /* | ||
| 858 | * If we have no errors on the specific PHB or only | 829 | * If we have no errors on the specific PHB or only |
| 859 | * informative error there, we continue poking it. | 830 | * informative error there, we continue poking it. |
| 860 | * Otherwise, we need actions to be taken by upper | 831 | * Otherwise, we need actions to be taken by upper |
| @@ -872,7 +843,6 @@ struct pnv_eeh_ops ioda_eeh_ops = { | |||
| 872 | .set_option = ioda_eeh_set_option, | 843 | .set_option = ioda_eeh_set_option, |
| 873 | .get_state = ioda_eeh_get_state, | 844 | .get_state = ioda_eeh_get_state, |
| 874 | .reset = ioda_eeh_reset, | 845 | .reset = ioda_eeh_reset, |
| 875 | .get_log = ioda_eeh_get_log, | ||
| 876 | .configure_bridge = ioda_eeh_configure_bridge, | 846 | .configure_bridge = ioda_eeh_configure_bridge, |
| 877 | .next_error = ioda_eeh_next_error | 847 | .next_error = ioda_eeh_next_error |
| 878 | }; | 848 | }; |
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index a79fddc5e74e..a59788e83b8b 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c | |||
| @@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) | |||
| 145 | * Enable EEH explicitly so that we will do EEH check | 145 | * Enable EEH explicitly so that we will do EEH check |
| 146 | * while accessing I/O stuff | 146 | * while accessing I/O stuff |
| 147 | */ | 147 | */ |
| 148 | eeh_subsystem_enabled = 1; | 148 | eeh_set_enable(true); |
| 149 | 149 | ||
| 150 | /* Save memory bars */ | 150 | /* Save memory bars */ |
| 151 | eeh_save_bars(edev); | 151 | eeh_save_bars(edev); |
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4fbf276ac99e..4cd2ea6c0dbe 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c | |||
| @@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t rc) | |||
| 71 | } | 71 | } |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static u64 opal_scom_unmangle(u64 reg) | 74 | static u64 opal_scom_unmangle(u64 addr) |
| 75 | { | 75 | { |
| 76 | /* | 76 | /* |
| 77 | * XSCOM indirect addresses have the top bit set. Additionally | 77 | * XSCOM indirect addresses have the top bit set. Additionally |
| 78 | * the reset of the top 3 nibbles is always 0. | 78 | * the rest of the top 3 nibbles is always 0. |
| 79 | * | 79 | * |
| 80 | * Because the debugfs interface uses signed offsets and shifts | 80 | * Because the debugfs interface uses signed offsets and shifts |
| 81 | * the address left by 3, we basically cannot use the top 4 bits | 81 | * the address left by 3, we basically cannot use the top 4 bits |
| @@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg) | |||
| 86 | * conversion here. To leave room for further xscom address | 86 | * conversion here. To leave room for further xscom address |
| 87 | * expansion, we only clear out the top byte | 87 | * expansion, we only clear out the top byte |
| 88 | * | 88 | * |
| 89 | * For in-kernel use, we also support the real indirect bit, so | ||
| 90 | * we test for any of the top 5 bits | ||
| 91 | * | ||
| 89 | */ | 92 | */ |
| 90 | if (reg & (1ull << 59)) | 93 | if (addr & (0x1full << 59)) |
| 91 | reg = (reg & ~(0xffull << 56)) | (1ull << 63); | 94 | addr = (addr & ~(0xffull << 56)) | (1ull << 63); |
| 92 | return reg; | 95 | return addr; |
| 93 | } | 96 | } |
| 94 | 97 | ||
| 95 | static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | 98 | static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) |
| @@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | |||
| 98 | int64_t rc; | 101 | int64_t rc; |
| 99 | __be64 v; | 102 | __be64 v; |
| 100 | 103 | ||
| 101 | reg = opal_scom_unmangle(reg); | 104 | reg = opal_scom_unmangle(m->addr + reg); |
| 102 | rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); | 105 | rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v)); |
| 103 | *value = be64_to_cpu(v); | 106 | *value = be64_to_cpu(v); |
| 104 | return opal_xscom_err_xlate(rc); | 107 | return opal_xscom_err_xlate(rc); |
| 105 | } | 108 | } |
| @@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t map, u64 reg, u64 value) | |||
| 109 | struct opal_scom_map *m = map; | 112 | struct opal_scom_map *m = map; |
| 110 | int64_t rc; | 113 | int64_t rc; |
| 111 | 114 | ||
| 112 | reg = opal_scom_unmangle(reg); | 115 | reg = opal_scom_unmangle(m->addr + reg); |
| 113 | rc = opal_xscom_write(m->chip, m->addr + reg, value); | 116 | rc = opal_xscom_write(m->chip, reg, value); |
| 114 | return opal_xscom_err_xlate(rc); | 117 | return opal_xscom_err_xlate(rc); |
| 115 | } | 118 | } |
| 116 | 119 | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7d6dcc6d5fa9..3b2b4fb3585b 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
| 22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
| 23 | #include <linux/msi.h> | 23 | #include <linux/msi.h> |
| 24 | #include <linux/memblock.h> | ||
| 24 | 25 | ||
| 25 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
| 26 | #include <asm/io.h> | 27 | #include <asm/io.h> |
| @@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev | |||
| 460 | return; | 461 | return; |
| 461 | 462 | ||
| 462 | pe = &phb->ioda.pe_array[pdn->pe_number]; | 463 | pe = &phb->ioda.pe_array[pdn->pe_number]; |
| 464 | WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); | ||
| 463 | set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); | 465 | set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); |
| 464 | } | 466 | } |
| 465 | 467 | ||
| 468 | static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, | ||
| 469 | struct pci_dev *pdev, u64 dma_mask) | ||
| 470 | { | ||
| 471 | struct pci_dn *pdn = pci_get_pdn(pdev); | ||
| 472 | struct pnv_ioda_pe *pe; | ||
| 473 | uint64_t top; | ||
| 474 | bool bypass = false; | ||
| 475 | |||
| 476 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | ||
| 477 | return -ENODEV;; | ||
| 478 | |||
| 479 | pe = &phb->ioda.pe_array[pdn->pe_number]; | ||
| 480 | if (pe->tce_bypass_enabled) { | ||
| 481 | top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; | ||
| 482 | bypass = (dma_mask >= top); | ||
| 483 | } | ||
| 484 | |||
| 485 | if (bypass) { | ||
| 486 | dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); | ||
| 487 | set_dma_ops(&pdev->dev, &dma_direct_ops); | ||
| 488 | set_dma_offset(&pdev->dev, pe->tce_bypass_base); | ||
| 489 | } else { | ||
| 490 | dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); | ||
| 491 | set_dma_ops(&pdev->dev, &dma_iommu_ops); | ||
| 492 | set_iommu_table_base(&pdev->dev, &pe->tce32_table); | ||
| 493 | } | ||
| 494 | return 0; | ||
| 495 | } | ||
| 496 | |||
| 466 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) | 497 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) |
| 467 | { | 498 | { |
| 468 | struct pci_dev *dev; | 499 | struct pci_dev *dev; |
| @@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, | |||
| 657 | __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); | 688 | __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); |
| 658 | } | 689 | } |
| 659 | 690 | ||
| 691 | static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) | ||
| 692 | { | ||
| 693 | struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, | ||
| 694 | tce32_table); | ||
| 695 | uint16_t window_id = (pe->pe_number << 1 ) + 1; | ||
| 696 | int64_t rc; | ||
| 697 | |||
| 698 | pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); | ||
| 699 | if (enable) { | ||
| 700 | phys_addr_t top = memblock_end_of_DRAM(); | ||
| 701 | |||
| 702 | top = roundup_pow_of_two(top); | ||
| 703 | rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, | ||
| 704 | pe->pe_number, | ||
| 705 | window_id, | ||
| 706 | pe->tce_bypass_base, | ||
| 707 | top); | ||
| 708 | } else { | ||
| 709 | rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, | ||
| 710 | pe->pe_number, | ||
| 711 | window_id, | ||
| 712 | pe->tce_bypass_base, | ||
| 713 | 0); | ||
| 714 | |||
| 715 | /* | ||
| 716 | * We might want to reset the DMA ops of all devices on | ||
| 717 | * this PE. However in theory, that shouldn't be necessary | ||
| 718 | * as this is used for VFIO/KVM pass-through and the device | ||
| 719 | * hasn't yet been returned to its kernel driver | ||
| 720 | */ | ||
| 721 | } | ||
| 722 | if (rc) | ||
| 723 | pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); | ||
| 724 | else | ||
| 725 | pe->tce_bypass_enabled = enable; | ||
| 726 | } | ||
| 727 | |||
| 728 | static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb, | ||
| 729 | struct pnv_ioda_pe *pe) | ||
| 730 | { | ||
| 731 | /* TVE #1 is selected by PCI address bit 59 */ | ||
| 732 | pe->tce_bypass_base = 1ull << 59; | ||
| 733 | |||
| 734 | /* Install set_bypass callback for VFIO */ | ||
| 735 | pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass; | ||
| 736 | |||
| 737 | /* Enable bypass by default */ | ||
| 738 | pnv_pci_ioda2_set_bypass(&pe->tce32_table, true); | ||
| 739 | } | ||
| 740 | |||
| 660 | static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | 741 | static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, |
| 661 | struct pnv_ioda_pe *pe) | 742 | struct pnv_ioda_pe *pe) |
| 662 | { | 743 | { |
| @@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, | |||
| 727 | else | 808 | else |
| 728 | pnv_ioda_setup_bus_dma(pe, pe->pbus); | 809 | pnv_ioda_setup_bus_dma(pe, pe->pbus); |
| 729 | 810 | ||
| 811 | /* Also create a bypass window */ | ||
| 812 | pnv_pci_ioda2_setup_bypass_pe(phb, pe); | ||
| 730 | return; | 813 | return; |
| 731 | fail: | 814 | fail: |
| 732 | if (pe->tce32_seg >= 0) | 815 | if (pe->tce32_seg >= 0) |
| @@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
| 1286 | 1369 | ||
| 1287 | /* Setup TCEs */ | 1370 | /* Setup TCEs */ |
| 1288 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; | 1371 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; |
| 1372 | phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; | ||
| 1289 | 1373 | ||
| 1290 | /* Setup shutdown function for kexec */ | 1374 | /* Setup shutdown function for kexec */ |
| 1291 | phb->shutdown = pnv_pci_ioda_shutdown; | 1375 | phb->shutdown = pnv_pci_ioda_shutdown; |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b555ebc57ef5..8518817dcdfd 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
| @@ -134,57 +134,72 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, | |||
| 134 | pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", | 134 | pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", |
| 135 | hose->global_number, common->version); | 135 | hose->global_number, common->version); |
| 136 | 136 | ||
| 137 | pr_info(" brdgCtl: %08x\n", data->brdgCtl); | 137 | if (data->brdgCtl) |
| 138 | 138 | pr_info(" brdgCtl: %08x\n", | |
| 139 | pr_info(" portStatusReg: %08x\n", data->portStatusReg); | 139 | data->brdgCtl); |
| 140 | pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); | 140 | if (data->portStatusReg || data->rootCmplxStatus || |
| 141 | pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); | 141 | data->busAgentStatus) |
| 142 | 142 | pr_info(" UtlSts: %08x %08x %08x\n", | |
| 143 | pr_info(" deviceStatus: %08x\n", data->deviceStatus); | 143 | data->portStatusReg, data->rootCmplxStatus, |
| 144 | pr_info(" slotStatus: %08x\n", data->slotStatus); | 144 | data->busAgentStatus); |
| 145 | pr_info(" linkStatus: %08x\n", data->linkStatus); | 145 | if (data->deviceStatus || data->slotStatus || |
| 146 | pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); | 146 | data->linkStatus || data->devCmdStatus || |
| 147 | pr_info(" devSecStatus: %08x\n", data->devSecStatus); | 147 | data->devSecStatus) |
| 148 | 148 | pr_info(" RootSts: %08x %08x %08x %08x %08x\n", | |
| 149 | pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); | 149 | data->deviceStatus, data->slotStatus, |
| 150 | pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); | 150 | data->linkStatus, data->devCmdStatus, |
| 151 | pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); | 151 | data->devSecStatus); |
| 152 | pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); | 152 | if (data->rootErrorStatus || data->uncorrErrorStatus || |
| 153 | pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); | 153 | data->corrErrorStatus) |
| 154 | pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); | 154 | pr_info(" RootErrSts: %08x %08x %08x\n", |
| 155 | pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); | 155 | data->rootErrorStatus, data->uncorrErrorStatus, |
| 156 | pr_info(" sourceId: %08x\n", data->sourceId); | 156 | data->corrErrorStatus); |
| 157 | pr_info(" errorClass: %016llx\n", data->errorClass); | 157 | if (data->tlpHdr1 || data->tlpHdr2 || |
| 158 | pr_info(" correlator: %016llx\n", data->correlator); | 158 | data->tlpHdr3 || data->tlpHdr4) |
| 159 | pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); | 159 | pr_info(" RootErrLog: %08x %08x %08x %08x\n", |
| 160 | pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); | 160 | data->tlpHdr1, data->tlpHdr2, |
| 161 | pr_info(" lemFir: %016llx\n", data->lemFir); | 161 | data->tlpHdr3, data->tlpHdr4); |
| 162 | pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); | 162 | if (data->sourceId || data->errorClass || |
| 163 | pr_info(" lemWOF: %016llx\n", data->lemWOF); | 163 | data->correlator) |
| 164 | pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); | 164 | pr_info(" RootErrLog1: %08x %016llx %016llx\n", |
| 165 | pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); | 165 | data->sourceId, data->errorClass, |
| 166 | pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); | 166 | data->correlator); |
| 167 | pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); | 167 | if (data->p7iocPlssr || data->p7iocCsr) |
| 168 | pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); | 168 | pr_info(" PhbSts: %016llx %016llx\n", |
| 169 | pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); | 169 | data->p7iocPlssr, data->p7iocCsr); |
| 170 | pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); | 170 | if (data->lemFir || data->lemErrorMask || |
| 171 | pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); | 171 | data->lemWOF) |
| 172 | pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); | 172 | pr_info(" Lem: %016llx %016llx %016llx\n", |
| 173 | pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); | 173 | data->lemFir, data->lemErrorMask, |
| 174 | pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); | 174 | data->lemWOF); |
| 175 | pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); | 175 | if (data->phbErrorStatus || data->phbFirstErrorStatus || |
| 176 | pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); | 176 | data->phbErrorLog0 || data->phbErrorLog1) |
| 177 | pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); | 177 | pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", |
| 178 | pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); | 178 | data->phbErrorStatus, data->phbFirstErrorStatus, |
| 179 | pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); | 179 | data->phbErrorLog0, data->phbErrorLog1); |
| 180 | if (data->mmioErrorStatus || data->mmioFirstErrorStatus || | ||
| 181 | data->mmioErrorLog0 || data->mmioErrorLog1) | ||
| 182 | pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", | ||
| 183 | data->mmioErrorStatus, data->mmioFirstErrorStatus, | ||
| 184 | data->mmioErrorLog0, data->mmioErrorLog1); | ||
| 185 | if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || | ||
| 186 | data->dma0ErrorLog0 || data->dma0ErrorLog1) | ||
| 187 | pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", | ||
| 188 | data->dma0ErrorStatus, data->dma0FirstErrorStatus, | ||
| 189 | data->dma0ErrorLog0, data->dma0ErrorLog1); | ||
| 190 | if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || | ||
| 191 | data->dma1ErrorLog0 || data->dma1ErrorLog1) | ||
| 192 | pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", | ||
| 193 | data->dma1ErrorStatus, data->dma1FirstErrorStatus, | ||
| 194 | data->dma1ErrorLog0, data->dma1ErrorLog1); | ||
| 180 | 195 | ||
| 181 | for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { | 196 | for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { |
| 182 | if ((data->pestA[i] >> 63) == 0 && | 197 | if ((data->pestA[i] >> 63) == 0 && |
| 183 | (data->pestB[i] >> 63) == 0) | 198 | (data->pestB[i] >> 63) == 0) |
| 184 | continue; | 199 | continue; |
| 185 | 200 | ||
| 186 | pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); | 201 | pr_info(" PE[%3d] A/B: %016llx %016llx\n", |
| 187 | pr_info(" PESTB: %016llx\n", data->pestB[i]); | 202 | i, data->pestA[i], data->pestB[i]); |
| 188 | } | 203 | } |
| 189 | } | 204 | } |
| 190 | 205 | ||
| @@ -197,62 +212,77 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, | |||
| 197 | data = (struct OpalIoPhb3ErrorData*)common; | 212 | data = (struct OpalIoPhb3ErrorData*)common; |
| 198 | pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", | 213 | pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", |
| 199 | hose->global_number, common->version); | 214 | hose->global_number, common->version); |
| 200 | 215 | if (data->brdgCtl) | |
| 201 | pr_info(" brdgCtl: %08x\n", data->brdgCtl); | 216 | pr_info(" brdgCtl: %08x\n", |
| 202 | 217 | data->brdgCtl); | |
| 203 | pr_info(" portStatusReg: %08x\n", data->portStatusReg); | 218 | if (data->portStatusReg || data->rootCmplxStatus || |
| 204 | pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); | 219 | data->busAgentStatus) |
| 205 | pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); | 220 | pr_info(" UtlSts: %08x %08x %08x\n", |
| 206 | 221 | data->portStatusReg, data->rootCmplxStatus, | |
| 207 | pr_info(" deviceStatus: %08x\n", data->deviceStatus); | 222 | data->busAgentStatus); |
| 208 | pr_info(" slotStatus: %08x\n", data->slotStatus); | 223 | if (data->deviceStatus || data->slotStatus || |
| 209 | pr_info(" linkStatus: %08x\n", data->linkStatus); | 224 | data->linkStatus || data->devCmdStatus || |
| 210 | pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); | 225 | data->devSecStatus) |
| 211 | pr_info(" devSecStatus: %08x\n", data->devSecStatus); | 226 | pr_info(" RootSts: %08x %08x %08x %08x %08x\n", |
| 212 | 227 | data->deviceStatus, data->slotStatus, | |
| 213 | pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); | 228 | data->linkStatus, data->devCmdStatus, |
| 214 | pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); | 229 | data->devSecStatus); |
| 215 | pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); | 230 | if (data->rootErrorStatus || data->uncorrErrorStatus || |
| 216 | pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); | 231 | data->corrErrorStatus) |
| 217 | pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); | 232 | pr_info(" RootErrSts: %08x %08x %08x\n", |
| 218 | pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); | 233 | data->rootErrorStatus, data->uncorrErrorStatus, |
| 219 | pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); | 234 | data->corrErrorStatus); |
| 220 | pr_info(" sourceId: %08x\n", data->sourceId); | 235 | if (data->tlpHdr1 || data->tlpHdr2 || |
| 221 | pr_info(" errorClass: %016llx\n", data->errorClass); | 236 | data->tlpHdr3 || data->tlpHdr4) |
| 222 | pr_info(" correlator: %016llx\n", data->correlator); | 237 | pr_info(" RootErrLog: %08x %08x %08x %08x\n", |
| 223 | 238 | data->tlpHdr1, data->tlpHdr2, | |
| 224 | pr_info(" nFir: %016llx\n", data->nFir); | 239 | data->tlpHdr3, data->tlpHdr4); |
| 225 | pr_info(" nFirMask: %016llx\n", data->nFirMask); | 240 | if (data->sourceId || data->errorClass || |
| 226 | pr_info(" nFirWOF: %016llx\n", data->nFirWOF); | 241 | data->correlator) |
| 227 | pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); | 242 | pr_info(" RootErrLog1: %08x %016llx %016llx\n", |
| 228 | pr_info(" PhbCsr: %016llx\n", data->phbCsr); | 243 | data->sourceId, data->errorClass, |
| 229 | pr_info(" lemFir: %016llx\n", data->lemFir); | 244 | data->correlator); |
| 230 | pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); | 245 | if (data->nFir || data->nFirMask || |
| 231 | pr_info(" lemWOF: %016llx\n", data->lemWOF); | 246 | data->nFirWOF) |
| 232 | pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); | 247 | pr_info(" nFir: %016llx %016llx %016llx\n", |
| 233 | pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); | 248 | data->nFir, data->nFirMask, |
| 234 | pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); | 249 | data->nFirWOF); |
| 235 | pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); | 250 | if (data->phbPlssr || data->phbCsr) |
| 236 | pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); | 251 | pr_info(" PhbSts: %016llx %016llx\n", |
| 237 | pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); | 252 | data->phbPlssr, data->phbCsr); |
| 238 | pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); | 253 | if (data->lemFir || data->lemErrorMask || |
| 239 | pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); | 254 | data->lemWOF) |
| 240 | pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); | 255 | pr_info(" Lem: %016llx %016llx %016llx\n", |
| 241 | pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); | 256 | data->lemFir, data->lemErrorMask, |
| 242 | pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); | 257 | data->lemWOF); |
| 243 | pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); | 258 | if (data->phbErrorStatus || data->phbFirstErrorStatus || |
| 244 | pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); | 259 | data->phbErrorLog0 || data->phbErrorLog1) |
| 245 | pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); | 260 | pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", |
| 246 | pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); | 261 | data->phbErrorStatus, data->phbFirstErrorStatus, |
| 247 | pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); | 262 | data->phbErrorLog0, data->phbErrorLog1); |
| 263 | if (data->mmioErrorStatus || data->mmioFirstErrorStatus || | ||
| 264 | data->mmioErrorLog0 || data->mmioErrorLog1) | ||
| 265 | pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", | ||
| 266 | data->mmioErrorStatus, data->mmioFirstErrorStatus, | ||
| 267 | data->mmioErrorLog0, data->mmioErrorLog1); | ||
| 268 | if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || | ||
| 269 | data->dma0ErrorLog0 || data->dma0ErrorLog1) | ||
| 270 | pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", | ||
| 271 | data->dma0ErrorStatus, data->dma0FirstErrorStatus, | ||
| 272 | data->dma0ErrorLog0, data->dma0ErrorLog1); | ||
| 273 | if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || | ||
| 274 | data->dma1ErrorLog0 || data->dma1ErrorLog1) | ||
| 275 | pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", | ||
| 276 | data->dma1ErrorStatus, data->dma1FirstErrorStatus, | ||
| 277 | data->dma1ErrorLog0, data->dma1ErrorLog1); | ||
| 248 | 278 | ||
| 249 | for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { | 279 | for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { |
| 250 | if ((data->pestA[i] >> 63) == 0 && | 280 | if ((data->pestA[i] >> 63) == 0 && |
| 251 | (data->pestB[i] >> 63) == 0) | 281 | (data->pestB[i] >> 63) == 0) |
| 252 | continue; | 282 | continue; |
| 253 | 283 | ||
| 254 | pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); | 284 | pr_info(" PE[%3d] A/B: %016llx %016llx\n", |
| 255 | pr_info(" PESTB: %016llx\n", data->pestB[i]); | 285 | i, data->pestA[i], data->pestB[i]); |
| 256 | } | 286 | } |
| 257 | } | 287 | } |
| 258 | 288 | ||
| @@ -634,6 +664,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) | |||
| 634 | pnv_pci_dma_fallback_setup(hose, pdev); | 664 | pnv_pci_dma_fallback_setup(hose, pdev); |
| 635 | } | 665 | } |
| 636 | 666 | ||
| 667 | int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | ||
| 668 | { | ||
| 669 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); | ||
| 670 | struct pnv_phb *phb = hose->private_data; | ||
| 671 | |||
| 672 | if (phb && phb->dma_set_mask) | ||
| 673 | return phb->dma_set_mask(phb, pdev, dma_mask); | ||
| 674 | return __dma_set_mask(&pdev->dev, dma_mask); | ||
| 675 | } | ||
| 676 | |||
| 637 | void pnv_pci_shutdown(void) | 677 | void pnv_pci_shutdown(void) |
| 638 | { | 678 | { |
| 639 | struct pci_controller *hose; | 679 | struct pci_controller *hose; |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 13f1942a9a5f..cde169442775 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
| @@ -54,7 +54,9 @@ struct pnv_ioda_pe { | |||
| 54 | struct iommu_table tce32_table; | 54 | struct iommu_table tce32_table; |
| 55 | phys_addr_t tce_inval_reg_phys; | 55 | phys_addr_t tce_inval_reg_phys; |
| 56 | 56 | ||
| 57 | /* XXX TODO: Add support for additional 64-bit iommus */ | 57 | /* 64-bit TCE bypass region */ |
| 58 | bool tce_bypass_enabled; | ||
| 59 | uint64_t tce_bypass_base; | ||
| 58 | 60 | ||
| 59 | /* MSIs. MVE index is identical for for 32 and 64 bit MSI | 61 | /* MSIs. MVE index is identical for for 32 and 64 bit MSI |
| 60 | * and -1 if not supported. (It's actually identical to the | 62 | * and -1 if not supported. (It's actually identical to the |
| @@ -113,6 +115,8 @@ struct pnv_phb { | |||
| 113 | unsigned int hwirq, unsigned int virq, | 115 | unsigned int hwirq, unsigned int virq, |
| 114 | unsigned int is_64, struct msi_msg *msg); | 116 | unsigned int is_64, struct msi_msg *msg); |
| 115 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); | 117 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); |
| 118 | int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev, | ||
| 119 | u64 dma_mask); | ||
| 116 | void (*fixup_phb)(struct pci_controller *hose); | 120 | void (*fixup_phb)(struct pci_controller *hose); |
| 117 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); | 121 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); |
| 118 | void (*shutdown)(struct pnv_phb *phb); | 122 | void (*shutdown)(struct pnv_phb *phb); |
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index de6819be1f95..0051e108ef0f 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h | |||
| @@ -7,12 +7,20 @@ extern void pnv_smp_init(void); | |||
| 7 | static inline void pnv_smp_init(void) { } | 7 | static inline void pnv_smp_init(void) { } |
| 8 | #endif | 8 | #endif |
| 9 | 9 | ||
| 10 | struct pci_dev; | ||
| 11 | |||
| 10 | #ifdef CONFIG_PCI | 12 | #ifdef CONFIG_PCI |
| 11 | extern void pnv_pci_init(void); | 13 | extern void pnv_pci_init(void); |
| 12 | extern void pnv_pci_shutdown(void); | 14 | extern void pnv_pci_shutdown(void); |
| 15 | extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask); | ||
| 13 | #else | 16 | #else |
| 14 | static inline void pnv_pci_init(void) { } | 17 | static inline void pnv_pci_init(void) { } |
| 15 | static inline void pnv_pci_shutdown(void) { } | 18 | static inline void pnv_pci_shutdown(void) { } |
| 19 | |||
| 20 | static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | ||
| 21 | { | ||
| 22 | return -ENODEV; | ||
| 23 | } | ||
| 16 | #endif | 24 | #endif |
| 17 | 25 | ||
| 18 | extern void pnv_lpc_init(void); | 26 | extern void pnv_lpc_init(void); |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 21166f65c97c..110f4fbd319f 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
| 28 | #include <linux/bug.h> | 28 | #include <linux/bug.h> |
| 29 | #include <linux/cpuidle.h> | 29 | #include <linux/cpuidle.h> |
| 30 | #include <linux/pci.h> | ||
| 30 | 31 | ||
| 31 | #include <asm/machdep.h> | 32 | #include <asm/machdep.h> |
| 32 | #include <asm/firmware.h> | 33 | #include <asm/firmware.h> |
| @@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex) | |||
| 141 | { | 142 | { |
| 142 | } | 143 | } |
| 143 | 144 | ||
| 145 | static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) | ||
| 146 | { | ||
| 147 | if (dev_is_pci(dev)) | ||
| 148 | return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask); | ||
| 149 | return __dma_set_mask(dev, dma_mask); | ||
| 150 | } | ||
| 151 | |||
| 144 | static void pnv_shutdown(void) | 152 | static void pnv_shutdown(void) |
| 145 | { | 153 | { |
| 146 | /* Let the PCI code clear up IODA tables */ | 154 | /* Let the PCI code clear up IODA tables */ |
| @@ -238,6 +246,7 @@ define_machine(powernv) { | |||
| 238 | .machine_shutdown = pnv_shutdown, | 246 | .machine_shutdown = pnv_shutdown, |
| 239 | .power_save = powernv_idle, | 247 | .power_save = powernv_idle, |
| 240 | .calibrate_decr = generic_calibrate_decr, | 248 | .calibrate_decr = generic_calibrate_decr, |
| 249 | .dma_set_mask = pnv_dma_set_mask, | ||
| 241 | #ifdef CONFIG_KEXEC | 250 | #ifdef CONFIG_KEXEC |
| 242 | .kexec_cpu_down = pnv_kexec_cpu_down, | 251 | .kexec_cpu_down = pnv_kexec_cpu_down, |
| 243 | #endif | 252 | #endif |
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 37300f6ee244..80b1d57c306a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
| @@ -20,6 +20,7 @@ config PPC_PSERIES | |||
| 20 | select PPC_DOORBELL | 20 | select PPC_DOORBELL |
| 21 | select HAVE_CONTEXT_TRACKING | 21 | select HAVE_CONTEXT_TRACKING |
| 22 | select HOTPLUG_CPU if SMP | 22 | select HOTPLUG_CPU if SMP |
| 23 | select ARCH_RANDOM | ||
| 23 | default y | 24 | default y |
| 24 | 25 | ||
| 25 | config PPC_SPLPAR | 26 | config PPC_SPLPAR |
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 9ef3cc8ebc11..8a8f0472d98f 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c | |||
| @@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) | |||
| 265 | enable = 1; | 265 | enable = 1; |
| 266 | 266 | ||
| 267 | if (enable) { | 267 | if (enable) { |
| 268 | eeh_subsystem_enabled = 1; | 268 | eeh_set_enable(true); |
| 269 | eeh_add_to_parent_pe(edev); | 269 | eeh_add_to_parent_pe(edev); |
| 270 | 270 | ||
| 271 | pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", | 271 | pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 82789e79e539..0ea99e3d4815 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
| @@ -35,12 +35,7 @@ | |||
| 35 | #include "offline_states.h" | 35 | #include "offline_states.h" |
| 36 | 36 | ||
| 37 | /* This version can't take the spinlock, because it never returns */ | 37 | /* This version can't take the spinlock, because it never returns */ |
| 38 | static struct rtas_args rtas_stop_self_args = { | 38 | static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; |
| 39 | .token = RTAS_UNKNOWN_SERVICE, | ||
| 40 | .nargs = 0, | ||
| 41 | .nret = 1, | ||
| 42 | .rets = &rtas_stop_self_args.args[0], | ||
| 43 | }; | ||
| 44 | 39 | ||
| 45 | static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = | 40 | static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = |
| 46 | CPU_STATE_OFFLINE; | 41 | CPU_STATE_OFFLINE; |
| @@ -93,15 +88,20 @@ void set_default_offline_state(int cpu) | |||
| 93 | 88 | ||
| 94 | static void rtas_stop_self(void) | 89 | static void rtas_stop_self(void) |
| 95 | { | 90 | { |
| 96 | struct rtas_args *args = &rtas_stop_self_args; | 91 | struct rtas_args args = { |
| 92 | .token = cpu_to_be32(rtas_stop_self_token), | ||
| 93 | .nargs = 0, | ||
| 94 | .nret = 1, | ||
| 95 | .rets = &args.args[0], | ||
| 96 | }; | ||
| 97 | 97 | ||
| 98 | local_irq_disable(); | 98 | local_irq_disable(); |
| 99 | 99 | ||
| 100 | BUG_ON(args->token == RTAS_UNKNOWN_SERVICE); | 100 | BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); |
| 101 | 101 | ||
| 102 | printk("cpu %u (hwid %u) Ready to die...\n", | 102 | printk("cpu %u (hwid %u) Ready to die...\n", |
| 103 | smp_processor_id(), hard_smp_processor_id()); | 103 | smp_processor_id(), hard_smp_processor_id()); |
| 104 | enter_rtas(__pa(args)); | 104 | enter_rtas(__pa(&args)); |
| 105 | 105 | ||
| 106 | panic("Alas, I survived.\n"); | 106 | panic("Alas, I survived.\n"); |
| 107 | } | 107 | } |
| @@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void) | |||
| 392 | } | 392 | } |
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | rtas_stop_self_args.token = rtas_token("stop-self"); | 395 | rtas_stop_self_token = rtas_token("stop-self"); |
| 396 | qcss_tok = rtas_token("query-cpu-stopped-state"); | 396 | qcss_tok = rtas_token("query-cpu-stopped-state"); |
| 397 | 397 | ||
| 398 | if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE || | 398 | if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || |
| 399 | qcss_tok == RTAS_UNKNOWN_SERVICE) { | 399 | qcss_tok == RTAS_UNKNOWN_SERVICE) { |
| 400 | printk(KERN_INFO "CPU Hotplug not supported by firmware " | 400 | printk(KERN_INFO "CPU Hotplug not supported by firmware " |
| 401 | "- disabling.\n"); | 401 | "- disabling.\n"); |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 70670a2d9cf2..c413ec158ff5 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
| @@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
| 113 | { | 113 | { |
| 114 | struct device_node *dn, *pdn; | 114 | struct device_node *dn, *pdn; |
| 115 | struct pci_bus *bus; | 115 | struct pci_bus *bus; |
| 116 | const __be32 *pcie_link_speed_stats; | 116 | u32 pcie_link_speed_stats[2]; |
| 117 | int rc; | ||
| 117 | 118 | ||
| 118 | bus = bridge->bus; | 119 | bus = bridge->bus; |
| 119 | 120 | ||
| @@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
| 122 | return 0; | 123 | return 0; |
| 123 | 124 | ||
| 124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { | 125 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { |
| 125 | pcie_link_speed_stats = of_get_property(pdn, | 126 | rc = of_property_read_u32_array(pdn, |
| 126 | "ibm,pcie-link-speed-stats", NULL); | 127 | "ibm,pcie-link-speed-stats", |
| 127 | if (pcie_link_speed_stats) | 128 | &pcie_link_speed_stats[0], 2); |
| 129 | if (!rc) | ||
| 128 | break; | 130 | break; |
| 129 | } | 131 | } |
| 130 | 132 | ||
| 131 | of_node_put(pdn); | 133 | of_node_put(pdn); |
| 132 | 134 | ||
| 133 | if (!pcie_link_speed_stats) { | 135 | if (rc) { |
| 134 | pr_err("no ibm,pcie-link-speed-stats property\n"); | 136 | pr_err("no ibm,pcie-link-speed-stats property\n"); |
| 135 | return 0; | 137 | return 0; |
| 136 | } | 138 | } |
| 137 | 139 | ||
| 138 | switch (be32_to_cpup(pcie_link_speed_stats)) { | 140 | switch (pcie_link_speed_stats[0]) { |
| 139 | case 0x01: | 141 | case 0x01: |
| 140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; | 142 | bus->max_bus_speed = PCIE_SPEED_2_5GT; |
| 141 | break; | 143 | break; |
| 142 | case 0x02: | 144 | case 0x02: |
| 143 | bus->max_bus_speed = PCIE_SPEED_5_0GT; | 145 | bus->max_bus_speed = PCIE_SPEED_5_0GT; |
| 144 | break; | 146 | break; |
| 147 | case 0x04: | ||
| 148 | bus->max_bus_speed = PCIE_SPEED_8_0GT; | ||
| 149 | break; | ||
| 145 | default: | 150 | default: |
| 146 | bus->max_bus_speed = PCI_SPEED_UNKNOWN; | 151 | bus->max_bus_speed = PCI_SPEED_UNKNOWN; |
| 147 | break; | 152 | break; |
| 148 | } | 153 | } |
| 149 | 154 | ||
| 150 | switch (be32_to_cpup(pcie_link_speed_stats)) { | 155 | switch (pcie_link_speed_stats[1]) { |
| 151 | case 0x01: | 156 | case 0x01: |
| 152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; | 157 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; |
| 153 | break; | 158 | break; |
| 154 | case 0x02: | 159 | case 0x02: |
| 155 | bus->cur_bus_speed = PCIE_SPEED_5_0GT; | 160 | bus->cur_bus_speed = PCIE_SPEED_5_0GT; |
| 156 | break; | 161 | break; |
| 162 | case 0x04: | ||
| 163 | bus->cur_bus_speed = PCIE_SPEED_8_0GT; | ||
| 164 | break; | ||
| 157 | default: | 165 | default: |
| 158 | bus->cur_bus_speed = PCI_SPEED_UNKNOWN; | 166 | bus->cur_bus_speed = PCI_SPEED_UNKNOWN; |
| 159 | break; | 167 | break; |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 8e639d7cbda7..972df0ffd4dc 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
| @@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image) | |||
| 430 | { | 430 | { |
| 431 | long rc; | 431 | long rc; |
| 432 | 432 | ||
| 433 | if (firmware_has_feature(FW_FEATURE_SET_MODE) && | 433 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
| 434 | (image->type != KEXEC_TYPE_CRASH)) { | ||
| 435 | rc = pSeries_disable_reloc_on_exc(); | 434 | rc = pSeries_disable_reloc_on_exc(); |
| 436 | if (rc != H_SUCCESS) | 435 | if (rc != H_SUCCESS) |
| 437 | pr_warning("Warning: Failed to disable relocation on " | 436 | pr_warning("Warning: Failed to disable relocation on " |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 0e166ed4cd16..8209744b2829 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
| @@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
| 886 | 886 | ||
| 887 | /* Default: read HW settings */ | 887 | /* Default: read HW settings */ |
| 888 | if (flow_type == IRQ_TYPE_DEFAULT) { | 888 | if (flow_type == IRQ_TYPE_DEFAULT) { |
| 889 | switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | | 889 | int vold_ps; |
| 890 | MPIC_INFO(VECPRI_SENSE_MASK))) { | 890 | |
| 891 | case MPIC_INFO(VECPRI_SENSE_EDGE) | | 891 | vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | |
| 892 | MPIC_INFO(VECPRI_POLARITY_POSITIVE): | 892 | MPIC_INFO(VECPRI_SENSE_MASK)); |
| 893 | flow_type = IRQ_TYPE_EDGE_RISING; | 893 | |
| 894 | break; | 894 | if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | |
| 895 | case MPIC_INFO(VECPRI_SENSE_EDGE) | | 895 | MPIC_INFO(VECPRI_POLARITY_POSITIVE))) |
| 896 | MPIC_INFO(VECPRI_POLARITY_NEGATIVE): | 896 | flow_type = IRQ_TYPE_EDGE_RISING; |
| 897 | flow_type = IRQ_TYPE_EDGE_FALLING; | 897 | else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) | |
| 898 | break; | 898 | MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) |
| 899 | case MPIC_INFO(VECPRI_SENSE_LEVEL) | | 899 | flow_type = IRQ_TYPE_EDGE_FALLING; |
| 900 | MPIC_INFO(VECPRI_POLARITY_POSITIVE): | 900 | else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | |
| 901 | flow_type = IRQ_TYPE_LEVEL_HIGH; | 901 | MPIC_INFO(VECPRI_POLARITY_POSITIVE))) |
| 902 | break; | 902 | flow_type = IRQ_TYPE_LEVEL_HIGH; |
| 903 | case MPIC_INFO(VECPRI_SENSE_LEVEL) | | 903 | else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) | |
| 904 | MPIC_INFO(VECPRI_POLARITY_NEGATIVE): | 904 | MPIC_INFO(VECPRI_POLARITY_NEGATIVE))) |
| 905 | flow_type = IRQ_TYPE_LEVEL_LOW; | 905 | flow_type = IRQ_TYPE_LEVEL_LOW; |
| 906 | break; | 906 | else |
| 907 | } | 907 | WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold); |
| 908 | } | 908 | } |
| 909 | 909 | ||
| 910 | /* Apply to irq desc */ | 910 | /* Apply to irq desc */ |
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a90731b3d44a..b07909850f77 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
| @@ -309,16 +309,23 @@ static void get_output_lock(void) | |||
| 309 | 309 | ||
| 310 | if (xmon_speaker == me) | 310 | if (xmon_speaker == me) |
| 311 | return; | 311 | return; |
| 312 | |||
| 312 | for (;;) { | 313 | for (;;) { |
| 313 | if (xmon_speaker == 0) { | 314 | last_speaker = cmpxchg(&xmon_speaker, 0, me); |
| 314 | last_speaker = cmpxchg(&xmon_speaker, 0, me); | 315 | if (last_speaker == 0) |
| 315 | if (last_speaker == 0) | 316 | return; |
| 316 | return; | 317 | |
| 317 | } | 318 | /* |
| 318 | timeout = 10000000; | 319 | * Wait a full second for the lock, we might be on a slow |
| 320 | * console, but check every 100us. | ||
| 321 | */ | ||
| 322 | timeout = 10000; | ||
| 319 | while (xmon_speaker == last_speaker) { | 323 | while (xmon_speaker == last_speaker) { |
| 320 | if (--timeout > 0) | 324 | if (--timeout > 0) { |
| 325 | udelay(100); | ||
| 321 | continue; | 326 | continue; |
| 327 | } | ||
| 328 | |||
| 322 | /* hostile takeover */ | 329 | /* hostile takeover */ |
| 323 | prev = cmpxchg(&xmon_speaker, last_speaker, me); | 330 | prev = cmpxchg(&xmon_speaker, last_speaker, me); |
| 324 | if (prev == last_speaker) | 331 | if (prev == last_speaker) |
| @@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
| 397 | } | 404 | } |
| 398 | 405 | ||
| 399 | xmon_fault_jmp[cpu] = recurse_jmp; | 406 | xmon_fault_jmp[cpu] = recurse_jmp; |
| 400 | cpumask_set_cpu(cpu, &cpus_in_xmon); | ||
| 401 | 407 | ||
| 402 | bp = NULL; | 408 | bp = NULL; |
| 403 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) | 409 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) |
| @@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
| 419 | release_output_lock(); | 425 | release_output_lock(); |
| 420 | } | 426 | } |
| 421 | 427 | ||
| 428 | cpumask_set_cpu(cpu, &cpus_in_xmon); | ||
| 429 | |||
| 422 | waiting: | 430 | waiting: |
| 423 | secondary = 1; | 431 | secondary = 1; |
| 424 | while (secondary && !xmon_gate) { | 432 | while (secondary && !xmon_gate) { |
