diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-26 12:39:02 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-26 12:39:02 -0400 |
| commit | 919a6d10fdd9e256dfcd31937fb0b18d1c066be6 (patch) | |
| tree | 7bec333f70193007267601b4240684d43f3315bd /arch/powerpc | |
| parent | cf2acfb2051fc67804162eebc5ebc8f55d3b7e2c (diff) | |
| parent | fd0cca754f3f6756bfdafe500e4f49b1b9e9723f (diff) | |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (29 commits)
powerpc/rtas: Fix watchdog driver temperature read functionality
powerpc/mm: Fix potential access to freed pages when using hugetlbfs
powerpc/440: Fix warning early debug code
powerpc/of: Fix usage of dev_set_name() in of_device_alloc()
powerpc/pasemi: Use raw spinlock in SMP TB sync
powerpc: Use one common impl. of RTAS timebase sync and use raw spinlock
powerpc/rtas: Turn rtas lock into a raw spinlock
powerpc: Add irqtrace support for 32-bit powerpc
powerpc/BSR: Fix BSR to allow mmap of small BSR on 64k kernel
powerpc/BSR: add 4096 byte BSR size
powerpc: Map more memory early on 601 processors
powerpc/pmac: Fix DMA ops for MacIO devices
powerpc/mm: Make k(un)map_atomic out of line
powerpc: Fix mpic alloc warning
powerpc: Fix output from show_regs
powerpc/pmac: Fix issues with PowerMac "PowerSurge" SMP
powerpc/amigaone: Limit ISA I/O range to 4k in the device tree
powerpc/warp: Platform fix for i2c change
powerpc: Have git ignore generated files from dtc compile
powerpc/mpic: Fix mapping of "DCR" based MPIC variants
...
Diffstat (limited to 'arch/powerpc')
33 files changed, 501 insertions, 347 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bf6cedfa05db..d00131ca0835 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
| @@ -62,7 +62,6 @@ config HAVE_LATENCYTOP_SUPPORT | |||
| 62 | 62 | ||
| 63 | config TRACE_IRQFLAGS_SUPPORT | 63 | config TRACE_IRQFLAGS_SUPPORT |
| 64 | bool | 64 | bool |
| 65 | depends on PPC64 | ||
| 66 | default y | 65 | default y |
| 67 | 66 | ||
| 68 | config LOCKDEP_SUPPORT | 67 | config LOCKDEP_SUPPORT |
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index 2f50acd11a60..3d80c3e9cf60 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore | |||
| @@ -36,3 +36,13 @@ zImage.pseries | |||
| 36 | zconf.h | 36 | zconf.h |
| 37 | zlib.h | 37 | zlib.h |
| 38 | zutil.h | 38 | zutil.h |
| 39 | fdt.c | ||
| 40 | fdt.h | ||
| 41 | fdt_ro.c | ||
| 42 | fdt_rw.c | ||
| 43 | fdt_strerror.c | ||
| 44 | fdt_sw.c | ||
| 45 | fdt_wip.c | ||
| 46 | libfdt.h | ||
| 47 | libfdt_internal.h | ||
| 48 | |||
diff --git a/arch/powerpc/boot/dts/amigaone.dts b/arch/powerpc/boot/dts/amigaone.dts index 26549fca2ed4..49ac36b16dd7 100644 --- a/arch/powerpc/boot/dts/amigaone.dts +++ b/arch/powerpc/boot/dts/amigaone.dts | |||
| @@ -70,8 +70,8 @@ | |||
| 70 | devsel-speed = <0x00000001>; | 70 | devsel-speed = <0x00000001>; |
| 71 | min-grant = <0>; | 71 | min-grant = <0>; |
| 72 | max-latency = <0>; | 72 | max-latency = <0>; |
| 73 | /* First 64k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */ | 73 | /* First 4k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */ |
| 74 | ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00010000>; | 74 | ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>; |
| 75 | interrupt-parent = <&i8259>; | 75 | interrupt-parent = <&i8259>; |
| 76 | #interrupt-cells = <2>; | 76 | #interrupt-cells = <2>; |
| 77 | #address-cells = <2>; | 77 | #address-cells = <2>; |
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts index a8dcb018c4a5..a680165292f2 100644 --- a/arch/powerpc/boot/dts/mpc8569mds.dts +++ b/arch/powerpc/boot/dts/mpc8569mds.dts | |||
| @@ -253,6 +253,7 @@ | |||
| 253 | /* Filled in by U-Boot */ | 253 | /* Filled in by U-Boot */ |
| 254 | clock-frequency = <0>; | 254 | clock-frequency = <0>; |
| 255 | status = "disabled"; | 255 | status = "disabled"; |
| 256 | sdhci,1-bit-only; | ||
| 256 | }; | 257 | }; |
| 257 | 258 | ||
| 258 | crypto@30000 { | 259 | crypto@30000 { |
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index 2ff798744c1d..7685ffde8821 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h | |||
| @@ -598,8 +598,6 @@ typedef struct risc_timer_pram { | |||
| 598 | #define CICR_IEN ((uint)0x00000080) /* Int. enable */ | 598 | #define CICR_IEN ((uint)0x00000080) /* Int. enable */ |
| 599 | #define CICR_SPS ((uint)0x00000001) /* SCC Spread */ | 599 | #define CICR_SPS ((uint)0x00000001) /* SCC Spread */ |
| 600 | 600 | ||
| 601 | #define IMAP_ADDR (get_immrbase()) | ||
| 602 | |||
| 603 | #define CPM_PIN_INPUT 0 | 601 | #define CPM_PIN_INPUT 0 |
| 604 | #define CPM_PIN_OUTPUT 1 | 602 | #define CPM_PIN_OUTPUT 1 |
| 605 | #define CPM_PIN_PRIMARY 0 | 603 | #define CPM_PIN_PRIMARY 0 |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 3d9e887c3c0c..b44aaabdd1a6 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
| @@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, | |||
| 309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 310 | 310 | ||
| 311 | BUG_ON(!dma_ops); | 311 | BUG_ON(!dma_ops); |
| 312 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | 312 | |
| 313 | if (dma_ops->sync_single_range_for_cpu) | ||
| 314 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | ||
| 313 | size, direction); | 315 | size, direction); |
| 314 | } | 316 | } |
| 315 | 317 | ||
| @@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev, | |||
| 320 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 322 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 321 | 323 | ||
| 322 | BUG_ON(!dma_ops); | 324 | BUG_ON(!dma_ops); |
| 323 | dma_ops->sync_single_range_for_device(dev, dma_handle, | 325 | |
| 326 | if (dma_ops->sync_single_range_for_device) | ||
| 327 | dma_ops->sync_single_range_for_device(dev, dma_handle, | ||
| 324 | 0, size, direction); | 328 | 0, size, direction); |
| 325 | } | 329 | } |
| 326 | 330 | ||
| @@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, | |||
| 331 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 335 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 332 | 336 | ||
| 333 | BUG_ON(!dma_ops); | 337 | BUG_ON(!dma_ops); |
| 334 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | 338 | |
| 339 | if (dma_ops->sync_sg_for_cpu) | ||
| 340 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | ||
| 335 | } | 341 | } |
| 336 | 342 | ||
| 337 | static inline void dma_sync_sg_for_device(struct device *dev, | 343 | static inline void dma_sync_sg_for_device(struct device *dev, |
| @@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev, | |||
| 341 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 347 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 342 | 348 | ||
| 343 | BUG_ON(!dma_ops); | 349 | BUG_ON(!dma_ops); |
| 344 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | 350 | |
| 351 | if (dma_ops->sync_sg_for_device) | ||
| 352 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | ||
| 345 | } | 353 | } |
| 346 | 354 | ||
| 347 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 355 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| @@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
| 351 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 359 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 352 | 360 | ||
| 353 | BUG_ON(!dma_ops); | 361 | BUG_ON(!dma_ops); |
| 354 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | 362 | |
| 363 | if (dma_ops->sync_single_range_for_cpu) | ||
| 364 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | ||
| 355 | offset, size, direction); | 365 | offset, size, direction); |
| 356 | } | 366 | } |
| 357 | 367 | ||
| @@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
| 362 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 372 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
| 363 | 373 | ||
| 364 | BUG_ON(!dma_ops); | 374 | BUG_ON(!dma_ops); |
| 365 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | 375 | |
| 376 | if (dma_ops->sync_single_range_for_device) | ||
| 377 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | ||
| 366 | size, direction); | 378 | size, direction); |
| 367 | } | 379 | } |
| 368 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ | 380 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ |
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 684a73f4324f..a74c4ee6c020 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h | |||
| @@ -22,9 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
| 24 | 24 | ||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
| 27 | #include <linux/highmem.h> | ||
| 28 | #include <asm/kmap_types.h> | 26 | #include <asm/kmap_types.h> |
| 29 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
| 30 | #include <asm/page.h> | 28 | #include <asm/page.h> |
| @@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table; | |||
| 62 | 60 | ||
| 63 | extern void *kmap_high(struct page *page); | 61 | extern void *kmap_high(struct page *page); |
| 64 | extern void kunmap_high(struct page *page); | 62 | extern void kunmap_high(struct page *page); |
| 63 | extern void *kmap_atomic_prot(struct page *page, enum km_type type, | ||
| 64 | pgprot_t prot); | ||
| 65 | extern void kunmap_atomic(void *kvaddr, enum km_type type); | ||
| 65 | 66 | ||
| 66 | static inline void *kmap(struct page *page) | 67 | static inline void *kmap(struct page *page) |
| 67 | { | 68 | { |
| @@ -79,62 +80,11 @@ static inline void kunmap(struct page *page) | |||
| 79 | kunmap_high(page); | 80 | kunmap_high(page); |
| 80 | } | 81 | } |
| 81 | 82 | ||
| 82 | /* | ||
| 83 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
| 84 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
| 85 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
| 86 | * it. | ||
| 87 | */ | ||
| 88 | static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
| 89 | { | ||
| 90 | unsigned int idx; | ||
| 91 | unsigned long vaddr; | ||
| 92 | |||
| 93 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
| 94 | pagefault_disable(); | ||
| 95 | if (!PageHighMem(page)) | ||
| 96 | return page_address(page); | ||
| 97 | |||
| 98 | debug_kmap_atomic(type); | ||
| 99 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 100 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 101 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 102 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
| 103 | #endif | ||
| 104 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); | ||
| 105 | local_flush_tlb_page(NULL, vaddr); | ||
| 106 | |||
| 107 | return (void*) vaddr; | ||
| 108 | } | ||
| 109 | |||
| 110 | static inline void *kmap_atomic(struct page *page, enum km_type type) | 83 | static inline void *kmap_atomic(struct page *page, enum km_type type) |
| 111 | { | 84 | { |
| 112 | return kmap_atomic_prot(page, type, kmap_prot); | 85 | return kmap_atomic_prot(page, type, kmap_prot); |
| 113 | } | 86 | } |
| 114 | 87 | ||
| 115 | static inline void kunmap_atomic(void *kvaddr, enum km_type type) | ||
| 116 | { | ||
| 117 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 118 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
| 119 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 120 | |||
| 121 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
| 122 | pagefault_enable(); | ||
| 123 | return; | ||
| 124 | } | ||
| 125 | |||
| 126 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
| 127 | |||
| 128 | /* | ||
| 129 | * force other mappings to Oops if they'll try to access | ||
| 130 | * this pte without first remap it | ||
| 131 | */ | ||
| 132 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
| 133 | local_flush_tlb_page(NULL, vaddr); | ||
| 134 | #endif | ||
| 135 | pagefault_enable(); | ||
| 136 | } | ||
| 137 | |||
| 138 | static inline struct page *kmap_atomic_to_page(void *ptr) | 88 | static inline struct page *kmap_atomic_to_page(void *ptr) |
| 139 | { | 89 | { |
| 140 | unsigned long idx, vaddr = (unsigned long) ptr; | 90 | unsigned long idx, vaddr = (unsigned long) ptr; |
| @@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr) | |||
| 148 | return pte_page(*pte); | 98 | return pte_page(*pte); |
| 149 | } | 99 | } |
| 150 | 100 | ||
| 101 | |||
| 151 | #define flush_cache_kmaps() flush_cache_all() | 102 | #define flush_cache_kmaps() flush_cache_all() |
| 152 | 103 | ||
| 153 | #endif /* __KERNEL__ */ | 104 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 867ab8ed69b3..8b505eaaa38a 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
| @@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags) | |||
| 68 | 68 | ||
| 69 | #if defined(CONFIG_BOOKE) | 69 | #if defined(CONFIG_BOOKE) |
| 70 | #define SET_MSR_EE(x) mtmsr(x) | 70 | #define SET_MSR_EE(x) mtmsr(x) |
| 71 | #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") | 71 | #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") |
| 72 | #else | 72 | #else |
| 73 | #define SET_MSR_EE(x) mtmsr(x) | 73 | #define SET_MSR_EE(x) mtmsr(x) |
| 74 | #define local_irq_restore(flags) mtmsr(flags) | 74 | #define raw_local_irq_restore(flags) mtmsr(flags) |
| 75 | #endif | 75 | #endif |
| 76 | 76 | ||
| 77 | static inline void local_irq_disable(void) | 77 | static inline void raw_local_irq_disable(void) |
| 78 | { | 78 | { |
| 79 | #ifdef CONFIG_BOOKE | 79 | #ifdef CONFIG_BOOKE |
| 80 | __asm__ __volatile__("wrteei 0": : :"memory"); | 80 | __asm__ __volatile__("wrteei 0": : :"memory"); |
| @@ -86,7 +86,7 @@ static inline void local_irq_disable(void) | |||
| 86 | #endif | 86 | #endif |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static inline void local_irq_enable(void) | 89 | static inline void raw_local_irq_enable(void) |
| 90 | { | 90 | { |
| 91 | #ifdef CONFIG_BOOKE | 91 | #ifdef CONFIG_BOOKE |
| 92 | __asm__ __volatile__("wrteei 1": : :"memory"); | 92 | __asm__ __volatile__("wrteei 1": : :"memory"); |
| @@ -98,7 +98,7 @@ static inline void local_irq_enable(void) | |||
| 98 | #endif | 98 | #endif |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static inline void local_irq_save_ptr(unsigned long *flags) | 101 | static inline void raw_local_irq_save_ptr(unsigned long *flags) |
| 102 | { | 102 | { |
| 103 | unsigned long msr; | 103 | unsigned long msr; |
| 104 | msr = mfmsr(); | 104 | msr = mfmsr(); |
| @@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags) | |||
| 110 | #endif | 110 | #endif |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | #define local_save_flags(flags) ((flags) = mfmsr()) | 113 | #define raw_local_save_flags(flags) ((flags) = mfmsr()) |
| 114 | #define local_irq_save(flags) local_irq_save_ptr(&flags) | 114 | #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) |
| 115 | #define irqs_disabled() ((mfmsr() & MSR_EE) == 0) | 115 | #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) |
| 116 | #define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) | ||
| 116 | 117 | ||
| 117 | #define hard_irq_enable() local_irq_enable() | 118 | #define hard_irq_disable() raw_local_irq_disable() |
| 118 | #define hard_irq_disable() local_irq_disable() | ||
| 119 | 119 | ||
| 120 | static inline int irqs_disabled_flags(unsigned long flags) | 120 | static inline int irqs_disabled_flags(unsigned long flags) |
| 121 | { | 121 | { |
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index e05d26fa372f..82b72207c51c 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
| @@ -47,7 +47,8 @@ | |||
| 47 | * generic accessors and iterators here | 47 | * generic accessors and iterators here |
| 48 | */ | 48 | */ |
| 49 | #define __real_pte(e,p) ((real_pte_t) { \ | 49 | #define __real_pte(e,p) ((real_pte_t) { \ |
| 50 | (e), pte_val(*((p) + PTRS_PER_PTE)) }) | 50 | (e), ((e) & _PAGE_COMBO) ? \ |
| 51 | (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) | ||
| 51 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ | 52 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ |
| 52 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) | 53 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) |
| 53 | #define __rpte_to_pte(r) ((r).pte) | 54 | #define __rpte_to_pte(r) ((r).pte) |
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 01c12339b304..168fce726201 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h | |||
| @@ -58,7 +58,7 @@ struct rtas_t { | |||
| 58 | unsigned long entry; /* physical address pointer */ | 58 | unsigned long entry; /* physical address pointer */ |
| 59 | unsigned long base; /* physical address pointer */ | 59 | unsigned long base; /* physical address pointer */ |
| 60 | unsigned long size; | 60 | unsigned long size; |
| 61 | spinlock_t lock; | 61 | raw_spinlock_t lock; |
| 62 | struct rtas_args args; | 62 | struct rtas_args args; |
| 63 | struct device_node *dev; /* virtual address pointer */ | 63 | struct device_node *dev; /* virtual address pointer */ |
| 64 | }; | 64 | }; |
| @@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg) | |||
| 245 | (devfn << 8) | (reg & 0xff); | 245 | (devfn << 8) | (reg & 0xff); |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | extern void __cpuinit rtas_give_timebase(void); | ||
| 249 | extern void __cpuinit rtas_take_timebase(void); | ||
| 250 | |||
| 248 | #endif /* __KERNEL__ */ | 251 | #endif /* __KERNEL__ */ |
| 249 | #endif /* _POWERPC_RTAS_H */ | 252 | #endif /* _POWERPC_RTAS_H */ |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 4dd38f129153..3cadba60a4b6 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
| @@ -191,11 +191,49 @@ transfer_to_handler_cont: | |||
| 191 | mflr r9 | 191 | mflr r9 |
| 192 | lwz r11,0(r9) /* virtual address of handler */ | 192 | lwz r11,0(r9) /* virtual address of handler */ |
| 193 | lwz r9,4(r9) /* where to go when done */ | 193 | lwz r9,4(r9) /* where to go when done */ |
| 194 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 195 | lis r12,reenable_mmu@h | ||
| 196 | ori r12,r12,reenable_mmu@l | ||
| 197 | mtspr SPRN_SRR0,r12 | ||
| 198 | mtspr SPRN_SRR1,r10 | ||
| 199 | SYNC | ||
| 200 | RFI | ||
| 201 | reenable_mmu: /* re-enable mmu so we can */ | ||
| 202 | mfmsr r10 | ||
| 203 | lwz r12,_MSR(r1) | ||
| 204 | xor r10,r10,r12 | ||
| 205 | andi. r10,r10,MSR_EE /* Did EE change? */ | ||
| 206 | beq 1f | ||
| 207 | |||
| 208 | /* Save handler and return address into the 2 unused words | ||
| 209 | * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything | ||
| 210 | * else can be recovered from the pt_regs except r3 which for | ||
| 211 | * normal interrupts has been set to pt_regs and for syscalls | ||
| 212 | * is an argument, so we temporarily use ORIG_GPR3 to save it | ||
| 213 | */ | ||
| 214 | stw r9,8(r1) | ||
| 215 | stw r11,12(r1) | ||
| 216 | stw r3,ORIG_GPR3(r1) | ||
| 217 | bl trace_hardirqs_off | ||
| 218 | lwz r0,GPR0(r1) | ||
| 219 | lwz r3,ORIG_GPR3(r1) | ||
| 220 | lwz r4,GPR4(r1) | ||
| 221 | lwz r5,GPR5(r1) | ||
| 222 | lwz r6,GPR6(r1) | ||
| 223 | lwz r7,GPR7(r1) | ||
| 224 | lwz r8,GPR8(r1) | ||
| 225 | lwz r9,8(r1) | ||
| 226 | lwz r11,12(r1) | ||
| 227 | 1: mtctr r11 | ||
| 228 | mtlr r9 | ||
| 229 | bctr /* jump to handler */ | ||
| 230 | #else /* CONFIG_TRACE_IRQFLAGS */ | ||
| 194 | mtspr SPRN_SRR0,r11 | 231 | mtspr SPRN_SRR0,r11 |
| 195 | mtspr SPRN_SRR1,r10 | 232 | mtspr SPRN_SRR1,r10 |
| 196 | mtlr r9 | 233 | mtlr r9 |
| 197 | SYNC | 234 | SYNC |
| 198 | RFI /* jump to handler, enable MMU */ | 235 | RFI /* jump to handler, enable MMU */ |
| 236 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 199 | 237 | ||
| 200 | #if defined (CONFIG_6xx) || defined(CONFIG_E500) | 238 | #if defined (CONFIG_6xx) || defined(CONFIG_E500) |
| 201 | 4: rlwinm r12,r12,0,~_TLF_NAPPING | 239 | 4: rlwinm r12,r12,0,~_TLF_NAPPING |
| @@ -251,6 +289,31 @@ _GLOBAL(DoSyscall) | |||
| 251 | #ifdef SHOW_SYSCALLS | 289 | #ifdef SHOW_SYSCALLS |
| 252 | bl do_show_syscall | 290 | bl do_show_syscall |
| 253 | #endif /* SHOW_SYSCALLS */ | 291 | #endif /* SHOW_SYSCALLS */ |
| 292 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 293 | /* Return from syscalls can (and generally will) hard enable | ||
| 294 | * interrupts. You aren't supposed to call a syscall with | ||
| 295 | * interrupts disabled in the first place. However, to ensure | ||
| 296 | * that we get it right vs. lockdep if it happens, we force | ||
| 297 | * that hard enable here with appropriate tracing if we see | ||
| 298 | * that we have been called with interrupts off | ||
| 299 | */ | ||
| 300 | mfmsr r11 | ||
| 301 | andi. r12,r11,MSR_EE | ||
| 302 | bne+ 1f | ||
| 303 | /* We came in with interrupts disabled, we enable them now */ | ||
| 304 | bl trace_hardirqs_on | ||
| 305 | mfmsr r11 | ||
| 306 | lwz r0,GPR0(r1) | ||
| 307 | lwz r3,GPR3(r1) | ||
| 308 | lwz r4,GPR4(r1) | ||
| 309 | ori r11,r11,MSR_EE | ||
| 310 | lwz r5,GPR5(r1) | ||
| 311 | lwz r6,GPR6(r1) | ||
| 312 | lwz r7,GPR7(r1) | ||
| 313 | lwz r8,GPR8(r1) | ||
| 314 | mtmsr r11 | ||
| 315 | 1: | ||
| 316 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 254 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 317 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
| 255 | lwz r11,TI_FLAGS(r10) | 318 | lwz r11,TI_FLAGS(r10) |
| 256 | andi. r11,r11,_TIF_SYSCALL_T_OR_A | 319 | andi. r11,r11,_TIF_SYSCALL_T_OR_A |
| @@ -275,6 +338,7 @@ ret_from_syscall: | |||
| 275 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 338 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
| 276 | /* disable interrupts so current_thread_info()->flags can't change */ | 339 | /* disable interrupts so current_thread_info()->flags can't change */ |
| 277 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ | 340 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ |
| 341 | /* Note: We don't bother telling lockdep about it */ | ||
| 278 | SYNC | 342 | SYNC |
| 279 | MTMSRD(r10) | 343 | MTMSRD(r10) |
| 280 | lwz r9,TI_FLAGS(r12) | 344 | lwz r9,TI_FLAGS(r12) |
| @@ -288,6 +352,19 @@ ret_from_syscall: | |||
| 288 | oris r11,r11,0x1000 /* Set SO bit in CR */ | 352 | oris r11,r11,0x1000 /* Set SO bit in CR */ |
| 289 | stw r11,_CCR(r1) | 353 | stw r11,_CCR(r1) |
| 290 | syscall_exit_cont: | 354 | syscall_exit_cont: |
| 355 | lwz r8,_MSR(r1) | ||
| 356 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 357 | /* If we are going to return from the syscall with interrupts | ||
| 358 | * off, we trace that here. It shouldn't happen though but we | ||
| 359 | * want to catch the bugger if it does right ? | ||
| 360 | */ | ||
| 361 | andi. r10,r8,MSR_EE | ||
| 362 | bne+ 1f | ||
| 363 | stw r3,GPR3(r1) | ||
| 364 | bl trace_hardirqs_off | ||
| 365 | lwz r3,GPR3(r1) | ||
| 366 | 1: | ||
| 367 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 291 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 368 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
| 292 | /* If the process has its own DBCR0 value, load it up. The internal | 369 | /* If the process has its own DBCR0 value, load it up. The internal |
| 293 | debug mode bit tells us that dbcr0 should be loaded. */ | 370 | debug mode bit tells us that dbcr0 should be loaded. */ |
| @@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
| 311 | mtlr r4 | 388 | mtlr r4 |
| 312 | mtcr r5 | 389 | mtcr r5 |
| 313 | lwz r7,_NIP(r1) | 390 | lwz r7,_NIP(r1) |
| 314 | lwz r8,_MSR(r1) | ||
| 315 | FIX_SRR1(r8, r0) | 391 | FIX_SRR1(r8, r0) |
| 316 | lwz r2,GPR2(r1) | 392 | lwz r2,GPR2(r1) |
| 317 | lwz r1,GPR1(r1) | 393 | lwz r1,GPR1(r1) |
| @@ -394,7 +470,9 @@ syscall_exit_work: | |||
| 394 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | 470 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
| 395 | beq ret_from_except | 471 | beq ret_from_except |
| 396 | 472 | ||
| 397 | /* Re-enable interrupts */ | 473 | /* Re-enable interrupts. There is no need to trace that with |
| 474 | * lockdep as we are supposed to have IRQs on at this point | ||
| 475 | */ | ||
| 398 | ori r10,r10,MSR_EE | 476 | ori r10,r10,MSR_EE |
| 399 | SYNC | 477 | SYNC |
| 400 | MTMSRD(r10) | 478 | MTMSRD(r10) |
| @@ -705,6 +783,7 @@ ret_from_except: | |||
| 705 | /* Hard-disable interrupts so that current_thread_info()->flags | 783 | /* Hard-disable interrupts so that current_thread_info()->flags |
| 706 | * can't change between when we test it and when we return | 784 | * can't change between when we test it and when we return |
| 707 | * from the interrupt. */ | 785 | * from the interrupt. */ |
| 786 | /* Note: We don't bother telling lockdep about it */ | ||
| 708 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 787 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
| 709 | SYNC /* Some chip revs have problems here... */ | 788 | SYNC /* Some chip revs have problems here... */ |
| 710 | MTMSRD(r10) /* disable interrupts */ | 789 | MTMSRD(r10) /* disable interrupts */ |
| @@ -744,11 +823,24 @@ resume_kernel: | |||
| 744 | beq+ restore | 823 | beq+ restore |
| 745 | andi. r0,r3,MSR_EE /* interrupts off? */ | 824 | andi. r0,r3,MSR_EE /* interrupts off? */ |
| 746 | beq restore /* don't schedule if so */ | 825 | beq restore /* don't schedule if so */ |
| 826 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 827 | /* Lockdep thinks irqs are enabled, we need to call | ||
| 828 | * preempt_schedule_irq with IRQs off, so we inform lockdep | ||
| 829 | * now that we -did- turn them off already | ||
| 830 | */ | ||
| 831 | bl trace_hardirqs_off | ||
| 832 | #endif | ||
| 747 | 1: bl preempt_schedule_irq | 833 | 1: bl preempt_schedule_irq |
| 748 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) | 834 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) |
| 749 | lwz r3,TI_FLAGS(r9) | 835 | lwz r3,TI_FLAGS(r9) |
| 750 | andi. r0,r3,_TIF_NEED_RESCHED | 836 | andi. r0,r3,_TIF_NEED_RESCHED |
| 751 | bne- 1b | 837 | bne- 1b |
| 838 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 839 | /* And now, to properly rebalance the above, we tell lockdep they | ||
| 840 | * are being turned back on, which will happen when we return | ||
| 841 | */ | ||
| 842 | bl trace_hardirqs_on | ||
| 843 | #endif | ||
| 752 | #else | 844 | #else |
| 753 | resume_kernel: | 845 | resume_kernel: |
| 754 | #endif /* CONFIG_PREEMPT */ | 846 | #endif /* CONFIG_PREEMPT */ |
| @@ -765,6 +857,28 @@ restore: | |||
| 765 | stw r6,icache_44x_need_flush@l(r4) | 857 | stw r6,icache_44x_need_flush@l(r4) |
| 766 | 1: | 858 | 1: |
| 767 | #endif /* CONFIG_44x */ | 859 | #endif /* CONFIG_44x */ |
| 860 | |||
| 861 | lwz r9,_MSR(r1) | ||
| 862 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 863 | /* Lockdep doesn't know about the fact that IRQs are temporarily turned | ||
| 864 | * off in this assembly code while peeking at TI_FLAGS() and such. However | ||
| 865 | * we need to inform it if the exception turned interrupts off, and we | ||
| 866 | * are about to trun them back on. | ||
| 867 | * | ||
| 868 | * The problem here sadly is that we don't know whether the exceptions was | ||
| 869 | * one that turned interrupts off or not. So we always tell lockdep about | ||
| 870 | * turning them on here when we go back to wherever we came from with EE | ||
| 871 | * on, even if that may meen some redudant calls being tracked. Maybe later | ||
| 872 | * we could encode what the exception did somewhere or test the exception | ||
| 873 | * type in the pt_regs but that sounds overkill | ||
| 874 | */ | ||
| 875 | andi. r10,r9,MSR_EE | ||
| 876 | beq 1f | ||
| 877 | bl trace_hardirqs_on | ||
| 878 | lwz r9,_MSR(r1) | ||
| 879 | 1: | ||
| 880 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
| 881 | |||
| 768 | lwz r0,GPR0(r1) | 882 | lwz r0,GPR0(r1) |
| 769 | lwz r2,GPR2(r1) | 883 | lwz r2,GPR2(r1) |
| 770 | REST_4GPRS(3, r1) | 884 | REST_4GPRS(3, r1) |
| @@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
| 782 | stwcx. r0,0,r1 /* to clear the reservation */ | 896 | stwcx. r0,0,r1 /* to clear the reservation */ |
| 783 | 897 | ||
| 784 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 898 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
| 785 | lwz r9,_MSR(r1) | ||
| 786 | andi. r10,r9,MSR_RI /* check if this exception occurred */ | 899 | andi. r10,r9,MSR_RI /* check if this exception occurred */ |
| 787 | beql nonrecoverable /* at a bad place (MSR:RI = 0) */ | 900 | beql nonrecoverable /* at a bad place (MSR:RI = 0) */ |
| 788 | 901 | ||
| @@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) | |||
| 805 | MTMSRD(r10) /* clear the RI bit */ | 918 | MTMSRD(r10) /* clear the RI bit */ |
| 806 | .globl exc_exit_restart | 919 | .globl exc_exit_restart |
| 807 | exc_exit_restart: | 920 | exc_exit_restart: |
| 808 | lwz r9,_MSR(r1) | ||
| 809 | lwz r12,_NIP(r1) | 921 | lwz r12,_NIP(r1) |
| 810 | FIX_SRR1(r9,r10) | 922 | FIX_SRR1(r9,r10) |
| 811 | mtspr SPRN_SRR0,r12 | 923 | mtspr SPRN_SRR0,r12 |
| @@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */ | |||
| 1035 | beq do_user_signal | 1147 | beq do_user_signal |
| 1036 | 1148 | ||
| 1037 | do_resched: /* r10 contains MSR_KERNEL here */ | 1149 | do_resched: /* r10 contains MSR_KERNEL here */ |
| 1150 | /* Note: We don't need to inform lockdep that we are enabling | ||
| 1151 | * interrupts here. As far as it knows, they are already enabled | ||
| 1152 | */ | ||
| 1038 | ori r10,r10,MSR_EE | 1153 | ori r10,r10,MSR_EE |
| 1039 | SYNC | 1154 | SYNC |
| 1040 | MTMSRD(r10) /* hard-enable interrupts */ | 1155 | MTMSRD(r10) /* hard-enable interrupts */ |
| 1041 | bl schedule | 1156 | bl schedule |
| 1042 | recheck: | 1157 | recheck: |
| 1158 | /* Note: And we don't tell it we are disabling them again | ||
| 1159 | * neither. Those disable/enable cycles used to peek at | ||
| 1160 | * TI_FLAGS aren't advertised. | ||
| 1161 | */ | ||
| 1043 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) | 1162 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
| 1044 | SYNC | 1163 | SYNC |
| 1045 | MTMSRD(r10) /* disable interrupts */ | 1164 | MTMSRD(r10) /* disable interrupts */ |
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 48469463f89e..fc2132942754 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
| @@ -1124,9 +1124,8 @@ mmu_off: | |||
| 1124 | RFI | 1124 | RFI |
| 1125 | 1125 | ||
| 1126 | /* | 1126 | /* |
| 1127 | * Use the first pair of BAT registers to map the 1st 16MB | 1127 | * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET |
| 1128 | * of RAM to PAGE_OFFSET. From this point on we can't safely | 1128 | * (we keep one for debugging) and on others, we use one 256M BAT. |
| 1129 | * call OF any more. | ||
| 1130 | */ | 1129 | */ |
| 1131 | initial_bats: | 1130 | initial_bats: |
| 1132 | lis r11,PAGE_OFFSET@h | 1131 | lis r11,PAGE_OFFSET@h |
| @@ -1136,12 +1135,16 @@ initial_bats: | |||
| 1136 | bne 4f | 1135 | bne 4f |
| 1137 | ori r11,r11,4 /* set up BAT registers for 601 */ | 1136 | ori r11,r11,4 /* set up BAT registers for 601 */ |
| 1138 | li r8,0x7f /* valid, block length = 8MB */ | 1137 | li r8,0x7f /* valid, block length = 8MB */ |
| 1139 | oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
| 1140 | oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
| 1141 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ | 1138 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ |
| 1142 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ | 1139 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ |
| 1143 | mtspr SPRN_IBAT1U,r9 | 1140 | addis r11,r11,0x800000@h |
| 1144 | mtspr SPRN_IBAT1L,r10 | 1141 | addis r8,r8,0x800000@h |
| 1142 | mtspr SPRN_IBAT1U,r11 | ||
| 1143 | mtspr SPRN_IBAT1L,r8 | ||
| 1144 | addis r11,r11,0x800000@h | ||
| 1145 | addis r8,r8,0x800000@h | ||
| 1146 | mtspr SPRN_IBAT2U,r11 | ||
| 1147 | mtspr SPRN_IBAT2L,r8 | ||
| 1145 | isync | 1148 | isync |
| 1146 | blr | 1149 | blr |
| 1147 | 1150 | ||
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c index fa983a59c4ce..a359cb08e900 100644 --- a/arch/powerpc/kernel/of_device.c +++ b/arch/powerpc/kernel/of_device.c | |||
| @@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np, | |||
| 76 | dev->dev.archdata.of_node = np; | 76 | dev->dev.archdata.of_node = np; |
| 77 | 77 | ||
| 78 | if (bus_id) | 78 | if (bus_id) |
| 79 | dev_set_name(&dev->dev, bus_id); | 79 | dev_set_name(&dev->dev, "%s", bus_id); |
| 80 | else | 80 | else |
| 81 | of_device_make_bus_id(dev); | 81 | of_device_make_bus_id(dev); |
| 82 | 82 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3e7135bbe40f..892a9f2e6d76 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs) | |||
| 528 | 528 | ||
| 529 | for (i = 0; i < 32; i++) { | 529 | for (i = 0; i < 32; i++) { |
| 530 | if ((i % REGS_PER_LINE) == 0) | 530 | if ((i % REGS_PER_LINE) == 0) |
| 531 | printk("\n" KERN_INFO "GPR%02d: ", i); | 531 | printk("\nGPR%02d: ", i); |
| 532 | printk(REG " ", regs->gpr[i]); | 532 | printk(REG " ", regs->gpr[i]); |
| 533 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) | 533 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) |
| 534 | break; | 534 | break; |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index ee4c7609b649..c434823b8c83 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
| @@ -38,9 +38,10 @@ | |||
| 38 | #include <asm/syscalls.h> | 38 | #include <asm/syscalls.h> |
| 39 | #include <asm/smp.h> | 39 | #include <asm/smp.h> |
| 40 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
| 41 | #include <asm/time.h> | ||
| 41 | 42 | ||
| 42 | struct rtas_t rtas = { | 43 | struct rtas_t rtas = { |
| 43 | .lock = SPIN_LOCK_UNLOCKED | 44 | .lock = __RAW_SPIN_LOCK_UNLOCKED |
| 44 | }; | 45 | }; |
| 45 | EXPORT_SYMBOL(rtas); | 46 | EXPORT_SYMBOL(rtas); |
| 46 | 47 | ||
| @@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf; | |||
| 67 | void (*rtas_flash_term_hook)(int); | 68 | void (*rtas_flash_term_hook)(int); |
| 68 | EXPORT_SYMBOL(rtas_flash_term_hook); | 69 | EXPORT_SYMBOL(rtas_flash_term_hook); |
| 69 | 70 | ||
| 71 | /* RTAS use home made raw locking instead of spin_lock_irqsave | ||
| 72 | * because those can be called from within really nasty contexts | ||
| 73 | * such as having the timebase stopped which would lockup with | ||
| 74 | * normal locks and spinlock debugging enabled | ||
| 75 | */ | ||
| 76 | static unsigned long lock_rtas(void) | ||
| 77 | { | ||
| 78 | unsigned long flags; | ||
| 79 | |||
| 80 | local_irq_save(flags); | ||
| 81 | preempt_disable(); | ||
| 82 | __raw_spin_lock_flags(&rtas.lock, flags); | ||
| 83 | return flags; | ||
| 84 | } | ||
| 85 | |||
| 86 | static void unlock_rtas(unsigned long flags) | ||
| 87 | { | ||
| 88 | __raw_spin_unlock(&rtas.lock); | ||
| 89 | local_irq_restore(flags); | ||
| 90 | preempt_enable(); | ||
| 91 | } | ||
| 92 | |||
| 70 | /* | 93 | /* |
| 71 | * call_rtas_display_status and call_rtas_display_status_delay | 94 | * call_rtas_display_status and call_rtas_display_status_delay |
| 72 | * are designed only for very early low-level debugging, which | 95 | * are designed only for very early low-level debugging, which |
| @@ -79,7 +102,7 @@ static void call_rtas_display_status(char c) | |||
| 79 | 102 | ||
| 80 | if (!rtas.base) | 103 | if (!rtas.base) |
| 81 | return; | 104 | return; |
| 82 | spin_lock_irqsave(&rtas.lock, s); | 105 | s = lock_rtas(); |
| 83 | 106 | ||
| 84 | args->token = 10; | 107 | args->token = 10; |
| 85 | args->nargs = 1; | 108 | args->nargs = 1; |
| @@ -89,7 +112,7 @@ static void call_rtas_display_status(char c) | |||
| 89 | 112 | ||
| 90 | enter_rtas(__pa(args)); | 113 | enter_rtas(__pa(args)); |
| 91 | 114 | ||
| 92 | spin_unlock_irqrestore(&rtas.lock, s); | 115 | unlock_rtas(s); |
| 93 | } | 116 | } |
| 94 | 117 | ||
| 95 | static void call_rtas_display_status_delay(char c) | 118 | static void call_rtas_display_status_delay(char c) |
| @@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) | |||
| 411 | if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) | 434 | if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) |
| 412 | return -1; | 435 | return -1; |
| 413 | 436 | ||
| 414 | /* Gotta do something different here, use global lock for now... */ | 437 | s = lock_rtas(); |
| 415 | spin_lock_irqsave(&rtas.lock, s); | ||
| 416 | rtas_args = &rtas.args; | 438 | rtas_args = &rtas.args; |
| 417 | 439 | ||
| 418 | rtas_args->token = token; | 440 | rtas_args->token = token; |
| @@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) | |||
| 439 | outputs[i] = rtas_args->rets[i+1]; | 461 | outputs[i] = rtas_args->rets[i+1]; |
| 440 | ret = (nret > 0)? rtas_args->rets[0]: 0; | 462 | ret = (nret > 0)? rtas_args->rets[0]: 0; |
| 441 | 463 | ||
| 442 | /* Gotta do something different here, use global lock for now... */ | 464 | unlock_rtas(s); |
| 443 | spin_unlock_irqrestore(&rtas.lock, s); | ||
| 444 | 465 | ||
| 445 | if (buff_copy) { | 466 | if (buff_copy) { |
| 446 | log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); | 467 | log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); |
| @@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) | |||
| 837 | 858 | ||
| 838 | buff_copy = get_errorlog_buffer(); | 859 | buff_copy = get_errorlog_buffer(); |
| 839 | 860 | ||
| 840 | spin_lock_irqsave(&rtas.lock, flags); | 861 | flags = lock_rtas(); |
| 841 | 862 | ||
| 842 | rtas.args = args; | 863 | rtas.args = args; |
| 843 | enter_rtas(__pa(&rtas.args)); | 864 | enter_rtas(__pa(&rtas.args)); |
| @@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) | |||
| 848 | if (args.rets[0] == -1) | 869 | if (args.rets[0] == -1) |
| 849 | errbuf = __fetch_rtas_last_error(buff_copy); | 870 | errbuf = __fetch_rtas_last_error(buff_copy); |
| 850 | 871 | ||
| 851 | spin_unlock_irqrestore(&rtas.lock, flags); | 872 | unlock_rtas(flags); |
| 852 | 873 | ||
| 853 | if (buff_copy) { | 874 | if (buff_copy) { |
| 854 | if (errbuf) | 875 | if (errbuf) |
| @@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node, | |||
| 951 | /* break now */ | 972 | /* break now */ |
| 952 | return 1; | 973 | return 1; |
| 953 | } | 974 | } |
| 975 | |||
| 976 | static raw_spinlock_t timebase_lock; | ||
| 977 | static u64 timebase = 0; | ||
| 978 | |||
| 979 | void __cpuinit rtas_give_timebase(void) | ||
| 980 | { | ||
| 981 | unsigned long flags; | ||
| 982 | |||
| 983 | local_irq_save(flags); | ||
| 984 | hard_irq_disable(); | ||
| 985 | __raw_spin_lock(&timebase_lock); | ||
| 986 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); | ||
| 987 | timebase = get_tb(); | ||
| 988 | __raw_spin_unlock(&timebase_lock); | ||
| 989 | |||
| 990 | while (timebase) | ||
| 991 | barrier(); | ||
| 992 | rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); | ||
| 993 | local_irq_restore(flags); | ||
| 994 | } | ||
| 995 | |||
| 996 | void __cpuinit rtas_take_timebase(void) | ||
| 997 | { | ||
| 998 | while (!timebase) | ||
| 999 | barrier(); | ||
| 1000 | __raw_spin_lock(&timebase_lock); | ||
| 1001 | set_tb(timebase >> 32, timebase & 0xffffffff); | ||
| 1002 | timebase = 0; | ||
| 1003 | __raw_spin_unlock(&timebase_lock); | ||
| 1004 | } | ||
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 1d154248cf40..e1e3059cf34b 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
| @@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) | |||
| 119 | */ | 119 | */ |
| 120 | notrace void __init machine_init(unsigned long dt_ptr) | 120 | notrace void __init machine_init(unsigned long dt_ptr) |
| 121 | { | 121 | { |
| 122 | lockdep_init(); | ||
| 123 | |||
| 122 | /* Enable early debugging if any specified (see udbg.h) */ | 124 | /* Enable early debugging if any specified (see udbg.h) */ |
| 123 | udbg_early_init(); | 125 | udbg_early_init(); |
| 124 | 126 | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 65484b2200b3..0b47de07302d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
| @@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |||
| 68 | /* SMP operations for this machine */ | 68 | /* SMP operations for this machine */ |
| 69 | struct smp_ops_t *smp_ops; | 69 | struct smp_ops_t *smp_ops; |
| 70 | 70 | ||
| 71 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | 71 | /* Can't be static due to PowerMac hackery */ |
| 72 | volatile unsigned int cpu_callin_map[NR_CPUS]; | ||
| 72 | 73 | ||
| 73 | int smt_enabled_at_boot = 1; | 74 | int smt_enabled_at_boot = 1; |
| 74 | 75 | ||
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 0362a891e54e..acb74a17bbbf 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c | |||
| @@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void) | |||
| 219 | #ifdef CONFIG_PPC_EARLY_DEBUG_44x | 219 | #ifdef CONFIG_PPC_EARLY_DEBUG_44x |
| 220 | #include <platforms/44x/44x.h> | 220 | #include <platforms/44x/44x.h> |
| 221 | 221 | ||
| 222 | static int udbg_44x_as1_flush(void) | 222 | static void udbg_44x_as1_flush(void) |
| 223 | { | 223 | { |
| 224 | if (udbg_comport) { | 224 | if (udbg_comport) { |
| 225 | while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0) | 225 | while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0) |
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 2d2192e48de7..3e68363405b7 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
| @@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o | |||
| 30 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 30 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
| 31 | obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o | 31 | obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o |
| 32 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o | 32 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o |
| 33 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c new file mode 100644 index 000000000000..c2186c74c85a --- /dev/null +++ b/arch/powerpc/mm/highmem.c | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | /* | ||
| 2 | * highmem.c: virtual kernel memory mappings for high memory | ||
| 3 | * | ||
| 4 | * PowerPC version, stolen from the i386 version. | ||
| 5 | * | ||
| 6 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
| 7 | * are not addressable by direct kernel virtual addresses. | ||
| 8 | * | ||
| 9 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
| 10 | * Gerhard.Wichert@pdb.siemens.de | ||
| 11 | * | ||
| 12 | * | ||
| 13 | * Redesigned the x86 32-bit VM architecture to deal with | ||
| 14 | * up to 16 Terrabyte physical memory. With current x86 CPUs | ||
| 15 | * we now support up to 64 Gigabytes physical RAM. | ||
| 16 | * | ||
| 17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
| 18 | * | ||
| 19 | * Reworked for PowerPC by various contributors. Moved from | ||
| 20 | * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/highmem.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | |||
| 26 | /* | ||
| 27 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
| 28 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
| 29 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
| 30 | * it. | ||
| 31 | */ | ||
| 32 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
| 33 | { | ||
| 34 | unsigned int idx; | ||
| 35 | unsigned long vaddr; | ||
| 36 | |||
| 37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
| 38 | pagefault_disable(); | ||
| 39 | if (!PageHighMem(page)) | ||
| 40 | return page_address(page); | ||
| 41 | |||
| 42 | debug_kmap_atomic(type); | ||
| 43 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
| 45 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 46 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
| 47 | #endif | ||
| 48 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); | ||
| 49 | local_flush_tlb_page(NULL, vaddr); | ||
| 50 | |||
| 51 | return (void*) vaddr; | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
| 54 | |||
| 55 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
| 56 | { | ||
| 57 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 58 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
| 59 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
| 60 | |||
| 61 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
| 62 | pagefault_enable(); | ||
| 63 | return; | ||
| 64 | } | ||
| 65 | |||
| 66 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
| 67 | |||
| 68 | /* | ||
| 69 | * force other mappings to Oops if they'll try to access | ||
| 70 | * this pte without first remap it | ||
| 71 | */ | ||
| 72 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
| 73 | local_flush_tlb_page(NULL, vaddr); | ||
| 74 | #endif | ||
| 75 | pagefault_enable(); | ||
| 76 | } | ||
| 77 | EXPORT_SYMBOL(kunmap_atomic); | ||
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c index 42e09a9f77e2..0362c88f47d7 100644 --- a/arch/powerpc/platforms/44x/warp.c +++ b/arch/powerpc/platforms/44x/warp.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
| 18 | #include <linux/of_gpio.h> | 18 | #include <linux/of_gpio.h> |
| 19 | #include <linux/of_i2c.h> | ||
| 19 | 20 | ||
| 20 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
| 21 | #include <asm/prom.h> | 22 | #include <asm/prom.h> |
| @@ -65,7 +66,6 @@ define_machine(warp) { | |||
| 65 | 66 | ||
| 66 | static u32 post_info; | 67 | static u32 post_info; |
| 67 | 68 | ||
| 68 | /* I am not sure this is the best place for this... */ | ||
| 69 | static int __init warp_post_info(void) | 69 | static int __init warp_post_info(void) |
| 70 | { | 70 | { |
| 71 | struct device_node *np; | 71 | struct device_node *np; |
| @@ -194,9 +194,9 @@ static int pika_setup_leds(void) | |||
| 194 | return 0; | 194 | return 0; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | static void pika_setup_critical_temp(struct i2c_client *client) | 197 | static void pika_setup_critical_temp(struct device_node *np, |
| 198 | struct i2c_client *client) | ||
| 198 | { | 199 | { |
| 199 | struct device_node *np; | ||
| 200 | int irq, rc; | 200 | int irq, rc; |
| 201 | 201 | ||
| 202 | /* Do this before enabling critical temp interrupt since we | 202 | /* Do this before enabling critical temp interrupt since we |
| @@ -208,14 +208,7 @@ static void pika_setup_critical_temp(struct i2c_client *client) | |||
| 208 | i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */ | 208 | i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */ |
| 209 | i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */ | 209 | i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */ |
| 210 | 210 | ||
| 211 | np = of_find_compatible_node(NULL, NULL, "adi,ad7414"); | ||
| 212 | if (np == NULL) { | ||
| 213 | printk(KERN_ERR __FILE__ ": Unable to find ad7414\n"); | ||
| 214 | return; | ||
| 215 | } | ||
| 216 | |||
| 217 | irq = irq_of_parse_and_map(np, 0); | 211 | irq = irq_of_parse_and_map(np, 0); |
| 218 | of_node_put(np); | ||
| 219 | if (irq == NO_IRQ) { | 212 | if (irq == NO_IRQ) { |
| 220 | printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n"); | 213 | printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n"); |
| 221 | return; | 214 | return; |
| @@ -244,32 +237,24 @@ static inline void pika_dtm_check_fan(void __iomem *fpga) | |||
| 244 | 237 | ||
| 245 | static int pika_dtm_thread(void __iomem *fpga) | 238 | static int pika_dtm_thread(void __iomem *fpga) |
| 246 | { | 239 | { |
| 247 | struct i2c_adapter *adap; | 240 | struct device_node *np; |
| 248 | struct i2c_client *client; | 241 | struct i2c_client *client; |
| 249 | 242 | ||
| 250 | /* We loop in case either driver was compiled as a module and | 243 | np = of_find_compatible_node(NULL, NULL, "adi,ad7414"); |
| 251 | * has not been insmoded yet. | 244 | if (np == NULL) |
| 252 | */ | 245 | return -ENOENT; |
| 253 | while (!(adap = i2c_get_adapter(0))) { | ||
| 254 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 255 | schedule_timeout(HZ); | ||
| 256 | } | ||
| 257 | |||
| 258 | while (1) { | ||
| 259 | list_for_each_entry(client, &adap->clients, list) | ||
| 260 | if (client->addr == 0x4a) | ||
| 261 | goto found_it; | ||
| 262 | 246 | ||
| 263 | set_current_state(TASK_INTERRUPTIBLE); | 247 | client = of_find_i2c_device_by_node(np); |
| 264 | schedule_timeout(HZ); | 248 | if (client == NULL) { |
| 249 | of_node_put(np); | ||
| 250 | return -ENOENT; | ||
| 265 | } | 251 | } |
| 266 | 252 | ||
| 267 | found_it: | 253 | pika_setup_critical_temp(np, client); |
| 268 | pika_setup_critical_temp(client); | ||
| 269 | 254 | ||
| 270 | i2c_put_adapter(adap); | 255 | of_node_put(np); |
| 271 | 256 | ||
| 272 | printk(KERN_INFO "PIKA DTM thread running.\n"); | 257 | printk(KERN_INFO "Warp DTM thread running.\n"); |
| 273 | 258 | ||
| 274 | while (!kthread_should_stop()) { | 259 | while (!kthread_should_stop()) { |
| 275 | int val; | 260 | int val; |
| @@ -291,7 +276,6 @@ found_it: | |||
| 291 | return 0; | 276 | return 0; |
| 292 | } | 277 | } |
| 293 | 278 | ||
| 294 | |||
| 295 | static int __init pika_dtm_start(void) | 279 | static int __init pika_dtm_start(void) |
| 296 | { | 280 | { |
| 297 | struct task_struct *dtm_thread; | 281 | struct task_struct *dtm_thread; |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 77f90b356356..60ed9c067b1d 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c | |||
| @@ -285,6 +285,7 @@ static struct of_device_id mpc85xx_ids[] = { | |||
| 285 | { .type = "qe", }, | 285 | { .type = "qe", }, |
| 286 | { .compatible = "fsl,qe", }, | 286 | { .compatible = "fsl,qe", }, |
| 287 | { .compatible = "gianfar", }, | 287 | { .compatible = "gianfar", }, |
| 288 | { .compatible = "fsl,rapidio-delta", }, | ||
| 288 | {}, | 289 | {}, |
| 289 | }; | 290 | }; |
| 290 | 291 | ||
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index cc0b0db8a6f3..62c592ede641 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c | |||
| @@ -52,20 +52,19 @@ smp_85xx_kick_cpu(int nr) | |||
| 52 | 52 | ||
| 53 | pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); | 53 | pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); |
| 54 | 54 | ||
| 55 | local_irq_save(flags); | ||
| 56 | |||
| 57 | np = of_get_cpu_node(nr, NULL); | 55 | np = of_get_cpu_node(nr, NULL); |
| 58 | cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); | 56 | cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); |
| 59 | 57 | ||
| 60 | if (cpu_rel_addr == NULL) { | 58 | if (cpu_rel_addr == NULL) { |
| 61 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); | 59 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); |
| 62 | local_irq_restore(flags); | ||
| 63 | return; | 60 | return; |
| 64 | } | 61 | } |
| 65 | 62 | ||
| 66 | /* Map the spin table */ | 63 | /* Map the spin table */ |
| 67 | bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); | 64 | bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); |
| 68 | 65 | ||
| 66 | local_irq_save(flags); | ||
| 67 | |||
| 69 | out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); | 68 | out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); |
| 70 | out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); | 69 | out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); |
| 71 | 70 | ||
| @@ -73,10 +72,10 @@ smp_85xx_kick_cpu(int nr) | |||
| 73 | while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) | 72 | while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) |
| 74 | mdelay(1); | 73 | mdelay(1); |
| 75 | 74 | ||
| 76 | iounmap(bptr_vaddr); | ||
| 77 | |||
| 78 | local_irq_restore(flags); | 75 | local_irq_restore(flags); |
| 79 | 76 | ||
| 77 | iounmap(bptr_vaddr); | ||
| 78 | |||
| 80 | pr_debug("waited %d msecs for CPU #%d.\n", n, nr); | 79 | pr_debug("waited %d msecs for CPU #%d.\n", n, nr); |
| 81 | } | 80 | } |
| 82 | 81 | ||
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c index d0e8443b12c6..747d8fb3ab82 100644 --- a/arch/powerpc/platforms/85xx/socrates.c +++ b/arch/powerpc/platforms/85xx/socrates.c | |||
| @@ -102,10 +102,11 @@ static struct of_device_id __initdata socrates_of_bus_ids[] = { | |||
| 102 | {}, | 102 | {}, |
| 103 | }; | 103 | }; |
| 104 | 104 | ||
| 105 | static void __init socrates_init(void) | 105 | static int __init socrates_publish_devices(void) |
| 106 | { | 106 | { |
| 107 | of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL); | 107 | return of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL); |
| 108 | } | 108 | } |
| 109 | machine_device_initcall(socrates, socrates_publish_devices); | ||
| 109 | 110 | ||
| 110 | /* | 111 | /* |
| 111 | * Called very early, device-tree isn't unflattened | 112 | * Called very early, device-tree isn't unflattened |
| @@ -124,7 +125,6 @@ define_machine(socrates) { | |||
| 124 | .name = "Socrates", | 125 | .name = "Socrates", |
| 125 | .probe = socrates_probe, | 126 | .probe = socrates_probe, |
| 126 | .setup_arch = socrates_setup_arch, | 127 | .setup_arch = socrates_setup_arch, |
| 127 | .init = socrates_init, | ||
| 128 | .init_IRQ = socrates_pic_init, | 128 | .init_IRQ = socrates_pic_init, |
| 129 | .get_irq = mpic_get_irq, | 129 | .get_irq = mpic_get_irq, |
| 130 | .restart = fsl_rstcr_restart, | 130 | .restart = fsl_rstcr_restart, |
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c index ee01532786e4..1b426050a2f9 100644 --- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c +++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | 32 | ||
| 33 | #include <sysdev/fsl_soc.h> | 33 | #include <sysdev/fsl_soc.h> |
| 34 | #include <sysdev/fsl_pci.h> | 34 | #include <sysdev/fsl_pci.h> |
| 35 | #include <linux/of_platform.h> | ||
| 36 | 35 | ||
| 37 | /* A few bit definitions needed for fixups on some boards */ | 36 | /* A few bit definitions needed for fixups on some boards */ |
| 38 | #define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */ | 37 | #define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */ |
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index 9046803c8276..bc97fada48c6 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <asm/prom.h> | 36 | #include <asm/prom.h> |
| 37 | #include <asm/smp.h> | 37 | #include <asm/smp.h> |
| 38 | #include <asm/paca.h> | 38 | #include <asm/paca.h> |
| 39 | #include <asm/time.h> | ||
| 40 | #include <asm/machdep.h> | 39 | #include <asm/machdep.h> |
| 41 | #include <asm/cputable.h> | 40 | #include <asm/cputable.h> |
| 42 | #include <asm/firmware.h> | 41 | #include <asm/firmware.h> |
| @@ -140,31 +139,6 @@ static void __devinit smp_cell_setup_cpu(int cpu) | |||
| 140 | mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); | 139 | mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); |
| 141 | } | 140 | } |
| 142 | 141 | ||
| 143 | static DEFINE_SPINLOCK(timebase_lock); | ||
| 144 | static unsigned long timebase = 0; | ||
| 145 | |||
| 146 | static void __devinit cell_give_timebase(void) | ||
| 147 | { | ||
| 148 | spin_lock(&timebase_lock); | ||
| 149 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); | ||
| 150 | timebase = get_tb(); | ||
| 151 | spin_unlock(&timebase_lock); | ||
| 152 | |||
| 153 | while (timebase) | ||
| 154 | barrier(); | ||
| 155 | rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); | ||
| 156 | } | ||
| 157 | |||
| 158 | static void __devinit cell_take_timebase(void) | ||
| 159 | { | ||
| 160 | while (!timebase) | ||
| 161 | barrier(); | ||
| 162 | spin_lock(&timebase_lock); | ||
| 163 | set_tb(timebase >> 32, timebase & 0xffffffff); | ||
| 164 | timebase = 0; | ||
| 165 | spin_unlock(&timebase_lock); | ||
| 166 | } | ||
| 167 | |||
| 168 | static void __devinit smp_cell_kick_cpu(int nr) | 142 | static void __devinit smp_cell_kick_cpu(int nr) |
| 169 | { | 143 | { |
| 170 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 144 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
| @@ -224,8 +198,8 @@ void __init smp_init_cell(void) | |||
| 224 | 198 | ||
| 225 | /* Non-lpar has additional take/give timebase */ | 199 | /* Non-lpar has additional take/give timebase */ |
| 226 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { | 200 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { |
| 227 | smp_ops->give_timebase = cell_give_timebase; | 201 | smp_ops->give_timebase = rtas_give_timebase; |
| 228 | smp_ops->take_timebase = cell_take_timebase; | 202 | smp_ops->take_timebase = rtas_take_timebase; |
| 229 | } | 203 | } |
| 230 | 204 | ||
| 231 | DBG(" <- smp_init_cell()\n"); | 205 | DBG(" <- smp_init_cell()\n"); |
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index 10a4a4d063b6..02cafecc90e3 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
| 27 | #include <asm/prom.h> | 27 | #include <asm/prom.h> |
| 28 | #include <asm/smp.h> | 28 | #include <asm/smp.h> |
| 29 | #include <asm/time.h> | ||
| 30 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
| 31 | #include <asm/mpic.h> | 30 | #include <asm/mpic.h> |
| 32 | #include <asm/rtas.h> | 31 | #include <asm/rtas.h> |
| @@ -42,40 +41,12 @@ static void __devinit smp_chrp_setup_cpu(int cpu_nr) | |||
| 42 | mpic_setup_this_cpu(); | 41 | mpic_setup_this_cpu(); |
| 43 | } | 42 | } |
| 44 | 43 | ||
| 45 | static DEFINE_SPINLOCK(timebase_lock); | ||
| 46 | static unsigned int timebase_upper = 0, timebase_lower = 0; | ||
| 47 | |||
| 48 | void __devinit smp_chrp_give_timebase(void) | ||
| 49 | { | ||
| 50 | spin_lock(&timebase_lock); | ||
| 51 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); | ||
| 52 | timebase_upper = get_tbu(); | ||
| 53 | timebase_lower = get_tbl(); | ||
| 54 | spin_unlock(&timebase_lock); | ||
| 55 | |||
| 56 | while (timebase_upper || timebase_lower) | ||
| 57 | barrier(); | ||
| 58 | rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); | ||
| 59 | } | ||
| 60 | |||
| 61 | void __devinit smp_chrp_take_timebase(void) | ||
| 62 | { | ||
| 63 | while (!(timebase_upper || timebase_lower)) | ||
| 64 | barrier(); | ||
| 65 | spin_lock(&timebase_lock); | ||
| 66 | set_tb(timebase_upper, timebase_lower); | ||
| 67 | timebase_upper = 0; | ||
| 68 | timebase_lower = 0; | ||
| 69 | spin_unlock(&timebase_lock); | ||
| 70 | printk("CPU %i taken timebase\n", smp_processor_id()); | ||
| 71 | } | ||
| 72 | |||
| 73 | /* CHRP with openpic */ | 44 | /* CHRP with openpic */ |
| 74 | struct smp_ops_t chrp_smp_ops = { | 45 | struct smp_ops_t chrp_smp_ops = { |
| 75 | .message_pass = smp_mpic_message_pass, | 46 | .message_pass = smp_mpic_message_pass, |
| 76 | .probe = smp_mpic_probe, | 47 | .probe = smp_mpic_probe, |
| 77 | .kick_cpu = smp_chrp_kick_cpu, | 48 | .kick_cpu = smp_chrp_kick_cpu, |
| 78 | .setup_cpu = smp_chrp_setup_cpu, | 49 | .setup_cpu = smp_chrp_setup_cpu, |
| 79 | .give_timebase = smp_chrp_give_timebase, | 50 | .give_timebase = rtas_give_timebase, |
| 80 | .take_timebase = smp_chrp_take_timebase, | 51 | .take_timebase = rtas_take_timebase, |
| 81 | }; | 52 | }; |
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 153051eb6d93..a4619347aa7e 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c | |||
| @@ -71,20 +71,25 @@ static void pas_restart(char *cmd) | |||
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | #ifdef CONFIG_SMP | 73 | #ifdef CONFIG_SMP |
| 74 | static DEFINE_SPINLOCK(timebase_lock); | 74 | static raw_spinlock_t timebase_lock; |
| 75 | static unsigned long timebase; | 75 | static unsigned long timebase; |
| 76 | 76 | ||
| 77 | static void __devinit pas_give_timebase(void) | 77 | static void __devinit pas_give_timebase(void) |
| 78 | { | 78 | { |
| 79 | spin_lock(&timebase_lock); | 79 | unsigned long flags; |
| 80 | |||
| 81 | local_irq_save(flags); | ||
| 82 | hard_irq_disable(); | ||
| 83 | __raw_spin_lock(&timebase_lock); | ||
| 80 | mtspr(SPRN_TBCTL, TBCTL_FREEZE); | 84 | mtspr(SPRN_TBCTL, TBCTL_FREEZE); |
| 81 | isync(); | 85 | isync(); |
| 82 | timebase = get_tb(); | 86 | timebase = get_tb(); |
| 83 | spin_unlock(&timebase_lock); | 87 | __raw_spin_unlock(&timebase_lock); |
| 84 | 88 | ||
| 85 | while (timebase) | 89 | while (timebase) |
| 86 | barrier(); | 90 | barrier(); |
| 87 | mtspr(SPRN_TBCTL, TBCTL_RESTART); | 91 | mtspr(SPRN_TBCTL, TBCTL_RESTART); |
| 92 | local_irq_restore(flags); | ||
| 88 | } | 93 | } |
| 89 | 94 | ||
| 90 | static void __devinit pas_take_timebase(void) | 95 | static void __devinit pas_take_timebase(void) |
| @@ -92,10 +97,10 @@ static void __devinit pas_take_timebase(void) | |||
| 92 | while (!timebase) | 97 | while (!timebase) |
| 93 | smp_rmb(); | 98 | smp_rmb(); |
| 94 | 99 | ||
| 95 | spin_lock(&timebase_lock); | 100 | __raw_spin_lock(&timebase_lock); |
| 96 | set_tb(timebase >> 32, timebase & 0xffffffff); | 101 | set_tb(timebase >> 32, timebase & 0xffffffff); |
| 97 | timebase = 0; | 102 | timebase = 0; |
| 98 | spin_unlock(&timebase_lock); | 103 | __raw_spin_unlock(&timebase_lock); |
| 99 | } | 104 | } |
| 100 | 105 | ||
| 101 | struct smp_ops_t pas_smp_ops = { | 106 | struct smp_ops_t pas_smp_ops = { |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 86f69a4eb49b..c20522656367 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
| @@ -103,11 +103,6 @@ unsigned long smu_cmdbuf_abs; | |||
| 103 | EXPORT_SYMBOL(smu_cmdbuf_abs); | 103 | EXPORT_SYMBOL(smu_cmdbuf_abs); |
| 104 | #endif | 104 | #endif |
| 105 | 105 | ||
| 106 | #ifdef CONFIG_SMP | ||
| 107 | extern struct smp_ops_t psurge_smp_ops; | ||
| 108 | extern struct smp_ops_t core99_smp_ops; | ||
| 109 | #endif /* CONFIG_SMP */ | ||
| 110 | |||
| 111 | static void pmac_show_cpuinfo(struct seq_file *m) | 106 | static void pmac_show_cpuinfo(struct seq_file *m) |
| 112 | { | 107 | { |
| 113 | struct device_node *np; | 108 | struct device_node *np; |
| @@ -341,34 +336,6 @@ static void __init pmac_setup_arch(void) | |||
| 341 | ROOT_DEV = DEFAULT_ROOT_DEVICE; | 336 | ROOT_DEV = DEFAULT_ROOT_DEVICE; |
| 342 | #endif | 337 | #endif |
| 343 | 338 | ||
| 344 | #ifdef CONFIG_SMP | ||
| 345 | /* Check for Core99 */ | ||
| 346 | ic = of_find_node_by_name(NULL, "uni-n"); | ||
| 347 | if (!ic) | ||
| 348 | ic = of_find_node_by_name(NULL, "u3"); | ||
| 349 | if (!ic) | ||
| 350 | ic = of_find_node_by_name(NULL, "u4"); | ||
| 351 | if (ic) { | ||
| 352 | of_node_put(ic); | ||
| 353 | smp_ops = &core99_smp_ops; | ||
| 354 | } | ||
| 355 | #ifdef CONFIG_PPC32 | ||
| 356 | else { | ||
| 357 | /* | ||
| 358 | * We have to set bits in cpu_possible_map here since the | ||
| 359 | * secondary CPU(s) aren't in the device tree, and | ||
| 360 | * setup_per_cpu_areas only allocates per-cpu data for | ||
| 361 | * CPUs in the cpu_possible_map. | ||
| 362 | */ | ||
| 363 | int cpu; | ||
| 364 | |||
| 365 | for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu) | ||
| 366 | cpu_set(cpu, cpu_possible_map); | ||
| 367 | smp_ops = &psurge_smp_ops; | ||
| 368 | } | ||
| 369 | #endif | ||
| 370 | #endif /* CONFIG_SMP */ | ||
| 371 | |||
| 372 | #ifdef CONFIG_ADB | 339 | #ifdef CONFIG_ADB |
| 373 | if (strstr(cmd_line, "adb_sync")) { | 340 | if (strstr(cmd_line, "adb_sync")) { |
| 374 | extern int __adb_probe_sync; | 341 | extern int __adb_probe_sync; |
| @@ -512,6 +479,14 @@ static void __init pmac_init_early(void) | |||
| 512 | #ifdef CONFIG_PPC64 | 479 | #ifdef CONFIG_PPC64 |
| 513 | iommu_init_early_dart(); | 480 | iommu_init_early_dart(); |
| 514 | #endif | 481 | #endif |
| 482 | |||
| 483 | /* SMP Init has to be done early as we need to patch up | ||
| 484 | * cpu_possible_map before interrupt stacks are allocated | ||
| 485 | * or kaboom... | ||
| 486 | */ | ||
| 487 | #ifdef CONFIG_SMP | ||
| 488 | pmac_setup_smp(); | ||
| 489 | #endif | ||
| 515 | } | 490 | } |
| 516 | 491 | ||
| 517 | static int __init pmac_declare_of_platform_devices(void) | 492 | static int __init pmac_declare_of_platform_devices(void) |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index cf1dbe758890..6d4da7b46b41 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
| @@ -64,10 +64,11 @@ | |||
| 64 | extern void __secondary_start_pmac_0(void); | 64 | extern void __secondary_start_pmac_0(void); |
| 65 | extern int pmac_pfunc_base_install(void); | 65 | extern int pmac_pfunc_base_install(void); |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_PPC32 | 67 | static void (*pmac_tb_freeze)(int freeze); |
| 68 | static u64 timebase; | ||
| 69 | static int tb_req; | ||
| 68 | 70 | ||
| 69 | /* Sync flag for HW tb sync */ | 71 | #ifdef CONFIG_PPC32 |
| 70 | static volatile int sec_tb_reset = 0; | ||
| 71 | 72 | ||
| 72 | /* | 73 | /* |
| 73 | * Powersurge (old powermac SMP) support. | 74 | * Powersurge (old powermac SMP) support. |
| @@ -294,6 +295,9 @@ static int __init smp_psurge_probe(void) | |||
| 294 | psurge_quad_init(); | 295 | psurge_quad_init(); |
| 295 | /* All released cards using this HW design have 4 CPUs */ | 296 | /* All released cards using this HW design have 4 CPUs */ |
| 296 | ncpus = 4; | 297 | ncpus = 4; |
| 298 | /* No sure how timebase sync works on those, let's use SW */ | ||
| 299 | smp_ops->give_timebase = smp_generic_give_timebase; | ||
| 300 | smp_ops->take_timebase = smp_generic_take_timebase; | ||
| 297 | } else { | 301 | } else { |
| 298 | iounmap(quad_base); | 302 | iounmap(quad_base); |
| 299 | if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { | 303 | if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { |
| @@ -308,18 +312,15 @@ static int __init smp_psurge_probe(void) | |||
| 308 | psurge_start = ioremap(PSURGE_START, 4); | 312 | psurge_start = ioremap(PSURGE_START, 4); |
| 309 | psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); | 313 | psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); |
| 310 | 314 | ||
| 311 | /* | 315 | /* This is necessary because OF doesn't know about the |
| 312 | * This is necessary because OF doesn't know about the | ||
| 313 | * secondary cpu(s), and thus there aren't nodes in the | 316 | * secondary cpu(s), and thus there aren't nodes in the |
| 314 | * device tree for them, and smp_setup_cpu_maps hasn't | 317 | * device tree for them, and smp_setup_cpu_maps hasn't |
| 315 | * set their bits in cpu_possible_map and cpu_present_map. | 318 | * set their bits in cpu_present_map. |
| 316 | */ | 319 | */ |
| 317 | if (ncpus > NR_CPUS) | 320 | if (ncpus > NR_CPUS) |
| 318 | ncpus = NR_CPUS; | 321 | ncpus = NR_CPUS; |
| 319 | for (i = 1; i < ncpus ; ++i) { | 322 | for (i = 1; i < ncpus ; ++i) |
| 320 | cpu_set(i, cpu_present_map); | 323 | cpu_set(i, cpu_present_map); |
| 321 | set_hard_smp_processor_id(i, i); | ||
| 322 | } | ||
| 323 | 324 | ||
| 324 | if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); | 325 | if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); |
| 325 | 326 | ||
| @@ -329,8 +330,14 @@ static int __init smp_psurge_probe(void) | |||
| 329 | static void __init smp_psurge_kick_cpu(int nr) | 330 | static void __init smp_psurge_kick_cpu(int nr) |
| 330 | { | 331 | { |
| 331 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; | 332 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; |
| 332 | unsigned long a; | 333 | unsigned long a, flags; |
| 333 | int i; | 334 | int i, j; |
| 335 | |||
| 336 | /* Defining this here is evil ... but I prefer hiding that | ||
| 337 | * crap to avoid giving people ideas that they can do the | ||
| 338 | * same. | ||
| 339 | */ | ||
| 340 | extern volatile unsigned int cpu_callin_map[NR_CPUS]; | ||
| 334 | 341 | ||
| 335 | /* may need to flush here if secondary bats aren't setup */ | 342 | /* may need to flush here if secondary bats aren't setup */ |
| 336 | for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32) | 343 | for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32) |
| @@ -339,47 +346,52 @@ static void __init smp_psurge_kick_cpu(int nr) | |||
| 339 | 346 | ||
| 340 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); | 347 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); |
| 341 | 348 | ||
| 349 | /* This is going to freeze the timeebase, we disable interrupts */ | ||
| 350 | local_irq_save(flags); | ||
| 351 | |||
| 342 | out_be32(psurge_start, start); | 352 | out_be32(psurge_start, start); |
| 343 | mb(); | 353 | mb(); |
| 344 | 354 | ||
| 345 | psurge_set_ipi(nr); | 355 | psurge_set_ipi(nr); |
| 356 | |||
| 346 | /* | 357 | /* |
| 347 | * We can't use udelay here because the timebase is now frozen. | 358 | * We can't use udelay here because the timebase is now frozen. |
| 348 | */ | 359 | */ |
| 349 | for (i = 0; i < 2000; ++i) | 360 | for (i = 0; i < 2000; ++i) |
| 350 | barrier(); | 361 | asm volatile("nop" : : : "memory"); |
| 351 | psurge_clr_ipi(nr); | 362 | psurge_clr_ipi(nr); |
| 352 | 363 | ||
| 353 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); | 364 | /* |
| 354 | } | 365 | * Also, because the timebase is frozen, we must not return to the |
| 355 | 366 | * caller which will try to do udelay's etc... Instead, we wait -here- | |
| 356 | /* | 367 | * for the CPU to callin. |
| 357 | * With the dual-cpu powersurge board, the decrementers and timebases | 368 | */ |
| 358 | * of both cpus are frozen after the secondary cpu is started up, | 369 | for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) { |
| 359 | * until we give the secondary cpu another interrupt. This routine | 370 | for (j = 1; j < 10000; j++) |
| 360 | * uses this to get the timebases synchronized. | 371 | asm volatile("nop" : : : "memory"); |
| 361 | * -- paulus. | 372 | asm volatile("sync" : : : "memory"); |
| 362 | */ | 373 | } |
| 363 | static void __init psurge_dual_sync_tb(int cpu_nr) | 374 | if (!cpu_callin_map[nr]) |
| 364 | { | 375 | goto stuck; |
| 365 | int t; | 376 | |
| 366 | 377 | /* And we do the TB sync here too for standard dual CPU cards */ | |
| 367 | set_dec(tb_ticks_per_jiffy); | 378 | if (psurge_type == PSURGE_DUAL) { |
| 368 | /* XXX fixme */ | 379 | while(!tb_req) |
| 369 | set_tb(0, 0); | 380 | barrier(); |
| 370 | 381 | tb_req = 0; | |
| 371 | if (cpu_nr > 0) { | 382 | mb(); |
| 383 | timebase = get_tb(); | ||
| 384 | mb(); | ||
| 385 | while (timebase) | ||
| 386 | barrier(); | ||
| 372 | mb(); | 387 | mb(); |
| 373 | sec_tb_reset = 1; | ||
| 374 | return; | ||
| 375 | } | 388 | } |
| 389 | stuck: | ||
| 390 | /* now interrupt the secondary, restarting both TBs */ | ||
| 391 | if (psurge_type == PSURGE_DUAL) | ||
| 392 | psurge_set_ipi(1); | ||
| 376 | 393 | ||
| 377 | /* wait for the secondary to have reset its TB before proceeding */ | 394 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); |
| 378 | for (t = 10000000; t > 0 && !sec_tb_reset; --t) | ||
| 379 | ; | ||
| 380 | |||
| 381 | /* now interrupt the secondary, starting both TBs */ | ||
| 382 | psurge_set_ipi(1); | ||
| 383 | } | 395 | } |
| 384 | 396 | ||
| 385 | static struct irqaction psurge_irqaction = { | 397 | static struct irqaction psurge_irqaction = { |
| @@ -390,36 +402,35 @@ static struct irqaction psurge_irqaction = { | |||
| 390 | 402 | ||
| 391 | static void __init smp_psurge_setup_cpu(int cpu_nr) | 403 | static void __init smp_psurge_setup_cpu(int cpu_nr) |
| 392 | { | 404 | { |
| 405 | if (cpu_nr != 0) | ||
| 406 | return; | ||
| 393 | 407 | ||
| 394 | if (cpu_nr == 0) { | 408 | /* reset the entry point so if we get another intr we won't |
| 395 | /* If we failed to start the second CPU, we should still | 409 | * try to startup again */ |
| 396 | * send it an IPI to start the timebase & DEC or we might | 410 | out_be32(psurge_start, 0x100); |
| 397 | * have them stuck. | 411 | if (setup_irq(30, &psurge_irqaction)) |
| 398 | */ | 412 | printk(KERN_ERR "Couldn't get primary IPI interrupt"); |
| 399 | if (num_online_cpus() < 2) { | ||
| 400 | if (psurge_type == PSURGE_DUAL) | ||
| 401 | psurge_set_ipi(1); | ||
| 402 | return; | ||
| 403 | } | ||
| 404 | /* reset the entry point so if we get another intr we won't | ||
| 405 | * try to startup again */ | ||
| 406 | out_be32(psurge_start, 0x100); | ||
| 407 | if (setup_irq(30, &psurge_irqaction)) | ||
| 408 | printk(KERN_ERR "Couldn't get primary IPI interrupt"); | ||
| 409 | } | ||
| 410 | |||
| 411 | if (psurge_type == PSURGE_DUAL) | ||
| 412 | psurge_dual_sync_tb(cpu_nr); | ||
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | void __init smp_psurge_take_timebase(void) | 415 | void __init smp_psurge_take_timebase(void) |
| 416 | { | 416 | { |
| 417 | /* Dummy implementation */ | 417 | if (psurge_type != PSURGE_DUAL) |
| 418 | return; | ||
| 419 | |||
| 420 | tb_req = 1; | ||
| 421 | mb(); | ||
| 422 | while (!timebase) | ||
| 423 | barrier(); | ||
| 424 | mb(); | ||
| 425 | set_tb(timebase >> 32, timebase & 0xffffffff); | ||
| 426 | timebase = 0; | ||
| 427 | mb(); | ||
| 428 | set_dec(tb_ticks_per_jiffy/2); | ||
| 418 | } | 429 | } |
| 419 | 430 | ||
| 420 | void __init smp_psurge_give_timebase(void) | 431 | void __init smp_psurge_give_timebase(void) |
| 421 | { | 432 | { |
| 422 | /* Dummy implementation */ | 433 | /* Nothing to do here */ |
| 423 | } | 434 | } |
| 424 | 435 | ||
| 425 | /* PowerSurge-style Macs */ | 436 | /* PowerSurge-style Macs */ |
| @@ -437,9 +448,6 @@ struct smp_ops_t psurge_smp_ops = { | |||
| 437 | * Core 99 and later support | 448 | * Core 99 and later support |
| 438 | */ | 449 | */ |
| 439 | 450 | ||
| 440 | static void (*pmac_tb_freeze)(int freeze); | ||
| 441 | static u64 timebase; | ||
| 442 | static int tb_req; | ||
| 443 | 451 | ||
| 444 | static void smp_core99_give_timebase(void) | 452 | static void smp_core99_give_timebase(void) |
| 445 | { | 453 | { |
| @@ -478,7 +486,6 @@ static void __devinit smp_core99_take_timebase(void) | |||
| 478 | set_tb(timebase >> 32, timebase & 0xffffffff); | 486 | set_tb(timebase >> 32, timebase & 0xffffffff); |
| 479 | timebase = 0; | 487 | timebase = 0; |
| 480 | mb(); | 488 | mb(); |
| 481 | set_dec(tb_ticks_per_jiffy/2); | ||
| 482 | 489 | ||
| 483 | local_irq_restore(flags); | 490 | local_irq_restore(flags); |
| 484 | } | 491 | } |
| @@ -920,3 +927,34 @@ struct smp_ops_t core99_smp_ops = { | |||
| 920 | # endif | 927 | # endif |
| 921 | #endif | 928 | #endif |
| 922 | }; | 929 | }; |
| 930 | |||
| 931 | void __init pmac_setup_smp(void) | ||
| 932 | { | ||
| 933 | struct device_node *np; | ||
| 934 | |||
| 935 | /* Check for Core99 */ | ||
| 936 | np = of_find_node_by_name(NULL, "uni-n"); | ||
| 937 | if (!np) | ||
| 938 | np = of_find_node_by_name(NULL, "u3"); | ||
| 939 | if (!np) | ||
| 940 | np = of_find_node_by_name(NULL, "u4"); | ||
| 941 | if (np) { | ||
| 942 | of_node_put(np); | ||
| 943 | smp_ops = &core99_smp_ops; | ||
| 944 | } | ||
| 945 | #ifdef CONFIG_PPC32 | ||
| 946 | else { | ||
| 947 | /* We have to set bits in cpu_possible_map here since the | ||
| 948 | * secondary CPU(s) aren't in the device tree. Various | ||
| 949 | * things won't be initialized for CPUs not in the possible | ||
| 950 | * map, so we really need to fix it up here. | ||
| 951 | */ | ||
| 952 | int cpu; | ||
| 953 | |||
| 954 | for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu) | ||
| 955 | cpu_set(cpu, cpu_possible_map); | ||
| 956 | smp_ops = &psurge_smp_ops; | ||
| 957 | } | ||
| 958 | #endif /* CONFIG_PPC32 */ | ||
| 959 | } | ||
| 960 | |||
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 1a231c389ba0..1f8f6cfb94f7 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
| @@ -35,7 +35,6 @@ | |||
| 35 | #include <asm/prom.h> | 35 | #include <asm/prom.h> |
| 36 | #include <asm/smp.h> | 36 | #include <asm/smp.h> |
| 37 | #include <asm/paca.h> | 37 | #include <asm/paca.h> |
| 38 | #include <asm/time.h> | ||
| 39 | #include <asm/machdep.h> | 38 | #include <asm/machdep.h> |
| 40 | #include <asm/cputable.h> | 39 | #include <asm/cputable.h> |
| 41 | #include <asm/firmware.h> | 40 | #include <asm/firmware.h> |
| @@ -118,31 +117,6 @@ static void __devinit smp_xics_setup_cpu(int cpu) | |||
| 118 | } | 117 | } |
| 119 | #endif /* CONFIG_XICS */ | 118 | #endif /* CONFIG_XICS */ |
| 120 | 119 | ||
| 121 | static DEFINE_SPINLOCK(timebase_lock); | ||
| 122 | static unsigned long timebase = 0; | ||
| 123 | |||
| 124 | static void __devinit pSeries_give_timebase(void) | ||
| 125 | { | ||
| 126 | spin_lock(&timebase_lock); | ||
| 127 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); | ||
| 128 | timebase = get_tb(); | ||
| 129 | spin_unlock(&timebase_lock); | ||
| 130 | |||
| 131 | while (timebase) | ||
| 132 | barrier(); | ||
| 133 | rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); | ||
| 134 | } | ||
| 135 | |||
| 136 | static void __devinit pSeries_take_timebase(void) | ||
| 137 | { | ||
| 138 | while (!timebase) | ||
| 139 | barrier(); | ||
| 140 | spin_lock(&timebase_lock); | ||
| 141 | set_tb(timebase >> 32, timebase & 0xffffffff); | ||
| 142 | timebase = 0; | ||
| 143 | spin_unlock(&timebase_lock); | ||
| 144 | } | ||
| 145 | |||
| 146 | static void __devinit smp_pSeries_kick_cpu(int nr) | 120 | static void __devinit smp_pSeries_kick_cpu(int nr) |
| 147 | { | 121 | { |
| 148 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 122 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
| @@ -209,8 +183,8 @@ static void __init smp_init_pseries(void) | |||
| 209 | 183 | ||
| 210 | /* Non-lpar has additional take/give timebase */ | 184 | /* Non-lpar has additional take/give timebase */ |
| 211 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { | 185 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { |
| 212 | smp_ops->give_timebase = pSeries_give_timebase; | 186 | smp_ops->give_timebase = rtas_give_timebase; |
| 213 | smp_ops->take_timebase = pSeries_take_timebase; | 187 | smp_ops->take_timebase = rtas_take_timebase; |
| 214 | } | 188 | } |
| 215 | 189 | ||
| 216 | pr_debug(" <- smp_init_pSeries()\n"); | 190 | pr_debug(" <- smp_init_pSeries()\n"); |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 9c3af5045495..d46de1f0f3ee 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
| @@ -279,28 +279,29 @@ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, | |||
| 279 | } | 279 | } |
| 280 | 280 | ||
| 281 | #ifdef CONFIG_PPC_DCR | 281 | #ifdef CONFIG_PPC_DCR |
| 282 | static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb, | 282 | static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node, |
| 283 | struct mpic_reg_bank *rb, | ||
| 283 | unsigned int offset, unsigned int size) | 284 | unsigned int offset, unsigned int size) |
| 284 | { | 285 | { |
| 285 | const u32 *dbasep; | 286 | const u32 *dbasep; |
| 286 | 287 | ||
| 287 | dbasep = of_get_property(mpic->irqhost->of_node, "dcr-reg", NULL); | 288 | dbasep = of_get_property(node, "dcr-reg", NULL); |
| 288 | 289 | ||
| 289 | rb->dhost = dcr_map(mpic->irqhost->of_node, *dbasep + offset, size); | 290 | rb->dhost = dcr_map(node, *dbasep + offset, size); |
| 290 | BUG_ON(!DCR_MAP_OK(rb->dhost)); | 291 | BUG_ON(!DCR_MAP_OK(rb->dhost)); |
| 291 | } | 292 | } |
| 292 | 293 | ||
| 293 | static inline void mpic_map(struct mpic *mpic, phys_addr_t phys_addr, | 294 | static inline void mpic_map(struct mpic *mpic, struct device_node *node, |
| 294 | struct mpic_reg_bank *rb, unsigned int offset, | 295 | phys_addr_t phys_addr, struct mpic_reg_bank *rb, |
| 295 | unsigned int size) | 296 | unsigned int offset, unsigned int size) |
| 296 | { | 297 | { |
| 297 | if (mpic->flags & MPIC_USES_DCR) | 298 | if (mpic->flags & MPIC_USES_DCR) |
| 298 | _mpic_map_dcr(mpic, rb, offset, size); | 299 | _mpic_map_dcr(mpic, node, rb, offset, size); |
| 299 | else | 300 | else |
| 300 | _mpic_map_mmio(mpic, phys_addr, rb, offset, size); | 301 | _mpic_map_mmio(mpic, phys_addr, rb, offset, size); |
| 301 | } | 302 | } |
| 302 | #else /* CONFIG_PPC_DCR */ | 303 | #else /* CONFIG_PPC_DCR */ |
| 303 | #define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) | 304 | #define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) |
| 304 | #endif /* !CONFIG_PPC_DCR */ | 305 | #endif /* !CONFIG_PPC_DCR */ |
| 305 | 306 | ||
| 306 | 307 | ||
| @@ -1052,11 +1053,10 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
| 1052 | int intvec_top; | 1053 | int intvec_top; |
| 1053 | u64 paddr = phys_addr; | 1054 | u64 paddr = phys_addr; |
| 1054 | 1055 | ||
| 1055 | mpic = alloc_bootmem(sizeof(struct mpic)); | 1056 | mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); |
| 1056 | if (mpic == NULL) | 1057 | if (mpic == NULL) |
| 1057 | return NULL; | 1058 | return NULL; |
| 1058 | 1059 | ||
| 1059 | memset(mpic, 0, sizeof(struct mpic)); | ||
| 1060 | mpic->name = name; | 1060 | mpic->name = name; |
| 1061 | 1061 | ||
| 1062 | mpic->hc_irq = mpic_irq_chip; | 1062 | mpic->hc_irq = mpic_irq_chip; |
| @@ -1152,8 +1152,8 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
| 1152 | } | 1152 | } |
| 1153 | 1153 | ||
| 1154 | /* Map the global registers */ | 1154 | /* Map the global registers */ |
| 1155 | mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); | 1155 | mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); |
| 1156 | mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); | 1156 | mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); |
| 1157 | 1157 | ||
| 1158 | /* Reset */ | 1158 | /* Reset */ |
| 1159 | if (flags & MPIC_WANTS_RESET) { | 1159 | if (flags & MPIC_WANTS_RESET) { |
| @@ -1194,7 +1194,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
| 1194 | 1194 | ||
| 1195 | /* Map the per-CPU registers */ | 1195 | /* Map the per-CPU registers */ |
| 1196 | for (i = 0; i < mpic->num_cpus; i++) { | 1196 | for (i = 0; i < mpic->num_cpus; i++) { |
| 1197 | mpic_map(mpic, paddr, &mpic->cpuregs[i], | 1197 | mpic_map(mpic, node, paddr, &mpic->cpuregs[i], |
| 1198 | MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE), | 1198 | MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE), |
| 1199 | 0x1000); | 1199 | 0x1000); |
| 1200 | } | 1200 | } |
| @@ -1202,7 +1202,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
| 1202 | /* Initialize main ISU if none provided */ | 1202 | /* Initialize main ISU if none provided */ |
| 1203 | if (mpic->isu_size == 0) { | 1203 | if (mpic->isu_size == 0) { |
| 1204 | mpic->isu_size = mpic->num_sources; | 1204 | mpic->isu_size = mpic->num_sources; |
| 1205 | mpic_map(mpic, paddr, &mpic->isus[0], | 1205 | mpic_map(mpic, node, paddr, &mpic->isus[0], |
| 1206 | MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); | 1206 | MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); |
| 1207 | } | 1207 | } |
| 1208 | mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); | 1208 | mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); |
| @@ -1256,8 +1256,10 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | |||
| 1256 | 1256 | ||
| 1257 | BUG_ON(isu_num >= MPIC_MAX_ISU); | 1257 | BUG_ON(isu_num >= MPIC_MAX_ISU); |
| 1258 | 1258 | ||
| 1259 | mpic_map(mpic, paddr, &mpic->isus[isu_num], 0, | 1259 | mpic_map(mpic, mpic->irqhost->of_node, |
| 1260 | paddr, &mpic->isus[isu_num], 0, | ||
| 1260 | MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); | 1261 | MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); |
| 1262 | |||
| 1261 | if ((isu_first + mpic->isu_size) > mpic->num_sources) | 1263 | if ((isu_first + mpic->isu_size) > mpic->num_sources) |
| 1262 | mpic->num_sources = isu_first + mpic->isu_size; | 1264 | mpic->num_sources = isu_first + mpic->isu_size; |
| 1263 | } | 1265 | } |
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index b28b0e512d67..237e3654f48c 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
| @@ -112,6 +112,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) | |||
| 112 | { | 112 | { |
| 113 | unsigned long flags; | 113 | unsigned long flags; |
| 114 | u8 mcn_shift = 0, dev_shift = 0; | 114 | u8 mcn_shift = 0, dev_shift = 0; |
| 115 | u32 ret; | ||
| 115 | 116 | ||
| 116 | spin_lock_irqsave(&qe_lock, flags); | 117 | spin_lock_irqsave(&qe_lock, flags); |
| 117 | if (cmd == QE_RESET) { | 118 | if (cmd == QE_RESET) { |
| @@ -139,11 +140,13 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) | |||
| 139 | } | 140 | } |
| 140 | 141 | ||
| 141 | /* wait for the QE_CR_FLG to clear */ | 142 | /* wait for the QE_CR_FLG to clear */ |
| 142 | while(in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) | 143 | ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, |
| 143 | cpu_relax(); | 144 | 100, 0); |
| 145 | /* On timeout (e.g. failure), the expression will be false (ret == 0), | ||
| 146 | otherwise it will be true (ret == 1). */ | ||
| 144 | spin_unlock_irqrestore(&qe_lock, flags); | 147 | spin_unlock_irqrestore(&qe_lock, flags); |
| 145 | 148 | ||
| 146 | return 0; | 149 | return ret == 1; |
| 147 | } | 150 | } |
| 148 | EXPORT_SYMBOL(qe_issue_cmd); | 151 | EXPORT_SYMBOL(qe_issue_cmd); |
| 149 | 152 | ||
