diff options
Diffstat (limited to 'arch/powerpc')
44 files changed, 399 insertions, 258 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d0e8a1dbf822..b6ff882f695b 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -138,7 +138,6 @@ config PPC | |||
138 | select HAVE_GENERIC_HARDIRQS | 138 | select HAVE_GENERIC_HARDIRQS |
139 | select HAVE_SPARSE_IRQ | 139 | select HAVE_SPARSE_IRQ |
140 | select IRQ_PER_CPU | 140 | select IRQ_PER_CPU |
141 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
142 | select GENERIC_IRQ_SHOW | 141 | select GENERIC_IRQ_SHOW |
143 | select GENERIC_IRQ_SHOW_LEVEL | 142 | select GENERIC_IRQ_SHOW_LEVEL |
144 | 143 | ||
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig index 6cf9d6614805..abf74dc1f79c 100644 --- a/arch/powerpc/configs/44x/warp_defconfig +++ b/arch/powerpc/configs/44x/warp_defconfig | |||
@@ -47,6 +47,7 @@ CONFIG_MTD_NAND_NDFC=y | |||
47 | CONFIG_MTD_UBI=y | 47 | CONFIG_MTD_UBI=y |
48 | CONFIG_PROC_DEVICETREE=y | 48 | CONFIG_PROC_DEVICETREE=y |
49 | CONFIG_BLK_DEV_RAM=y | 49 | CONFIG_BLK_DEV_RAM=y |
50 | CONFIG_MISC_DEVICES=y | ||
50 | CONFIG_EEPROM_AT24=y | 51 | CONFIG_EEPROM_AT24=y |
51 | CONFIG_SCSI=y | 52 | CONFIG_SCSI=y |
52 | CONFIG_BLK_DEV_SD=y | 53 | CONFIG_BLK_DEV_SD=y |
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig index 6828eda02bdc..0c7de9620ea6 100644 --- a/arch/powerpc/configs/52xx/motionpro_defconfig +++ b/arch/powerpc/configs/52xx/motionpro_defconfig | |||
@@ -43,6 +43,7 @@ CONFIG_PROC_DEVICETREE=y | |||
43 | CONFIG_BLK_DEV_LOOP=y | 43 | CONFIG_BLK_DEV_LOOP=y |
44 | CONFIG_BLK_DEV_RAM=y | 44 | CONFIG_BLK_DEV_RAM=y |
45 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 45 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
46 | CONFIG_MISC_DEVICES=y | ||
46 | CONFIG_EEPROM_LEGACY=y | 47 | CONFIG_EEPROM_LEGACY=y |
47 | CONFIG_SCSI_TGT=y | 48 | CONFIG_SCSI_TGT=y |
48 | CONFIG_BLK_DEV_SD=y | 49 | CONFIG_BLK_DEV_SD=y |
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig index 4b2441244eab..d41857a5152d 100644 --- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig +++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig | |||
@@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m | |||
85 | CONFIG_BLK_DEV_NBD=m | 85 | CONFIG_BLK_DEV_NBD=m |
86 | CONFIG_BLK_DEV_RAM=y | 86 | CONFIG_BLK_DEV_RAM=y |
87 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 87 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
88 | CONFIG_MISC_DEVICES=y | ||
88 | CONFIG_DS1682=y | 89 | CONFIG_DS1682=y |
89 | CONFIG_IDE=y | 90 | CONFIG_IDE=y |
90 | CONFIG_BLK_DEV_IDECS=y | 91 | CONFIG_BLK_DEV_IDECS=y |
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig index a360ba44b928..38303ec11bcd 100644 --- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig | |||
@@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m | |||
85 | CONFIG_BLK_DEV_NBD=m | 85 | CONFIG_BLK_DEV_NBD=m |
86 | CONFIG_BLK_DEV_RAM=y | 86 | CONFIG_BLK_DEV_RAM=y |
87 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 87 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
88 | CONFIG_MISC_DEVICES=y | ||
88 | CONFIG_DS1682=y | 89 | CONFIG_DS1682=y |
89 | CONFIG_IDE=y | 90 | CONFIG_IDE=y |
90 | CONFIG_BLK_DEV_IDECS=y | 91 | CONFIG_BLK_DEV_IDECS=y |
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig index be2829dd129f..98533973d20f 100644 --- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig | |||
@@ -138,6 +138,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m | |||
138 | CONFIG_BLK_DEV_NBD=m | 138 | CONFIG_BLK_DEV_NBD=m |
139 | CONFIG_BLK_DEV_RAM=y | 139 | CONFIG_BLK_DEV_RAM=y |
140 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 140 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
141 | CONFIG_MISC_DEVICES=y | ||
141 | CONFIG_DS1682=y | 142 | CONFIG_DS1682=y |
142 | CONFIG_BLK_DEV_SD=y | 143 | CONFIG_BLK_DEV_SD=y |
143 | CONFIG_CHR_DEV_ST=y | 144 | CONFIG_CHR_DEV_ST=y |
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig index 0c9c7ed7ec75..b614508d6fd2 100644 --- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig +++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig | |||
@@ -63,6 +63,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
63 | CONFIG_BLK_DEV_NBD=y | 63 | CONFIG_BLK_DEV_NBD=y |
64 | CONFIG_BLK_DEV_RAM=y | 64 | CONFIG_BLK_DEV_RAM=y |
65 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 65 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
66 | CONFIG_MISC_DEVICES=y | ||
66 | CONFIG_EEPROM_LEGACY=y | 67 | CONFIG_EEPROM_LEGACY=y |
67 | CONFIG_BLK_DEV_SD=y | 68 | CONFIG_BLK_DEV_SD=y |
68 | CONFIG_CHR_DEV_ST=y | 69 | CONFIG_CHR_DEV_ST=y |
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/e55xx_smp_defconfig index 06f95492afc7..9fa1613e5e2b 100644 --- a/arch/powerpc/configs/e55xx_smp_defconfig +++ b/arch/powerpc/configs/e55xx_smp_defconfig | |||
@@ -32,6 +32,7 @@ CONFIG_PROC_DEVICETREE=y | |||
32 | CONFIG_BLK_DEV_LOOP=y | 32 | CONFIG_BLK_DEV_LOOP=y |
33 | CONFIG_BLK_DEV_RAM=y | 33 | CONFIG_BLK_DEV_RAM=y |
34 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 34 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
35 | CONFIG_MISC_DEVICES=y | ||
35 | CONFIG_EEPROM_LEGACY=y | 36 | CONFIG_EEPROM_LEGACY=y |
36 | CONFIG_INPUT_FF_MEMLESS=m | 37 | CONFIG_INPUT_FF_MEMLESS=m |
37 | # CONFIG_INPUT_MOUSEDEV is not set | 38 | # CONFIG_INPUT_MOUSEDEV is not set |
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig index f39d0cf876dd..8a874b999867 100644 --- a/arch/powerpc/configs/linkstation_defconfig +++ b/arch/powerpc/configs/linkstation_defconfig | |||
@@ -78,6 +78,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
78 | CONFIG_BLK_DEV_RAM=y | 78 | CONFIG_BLK_DEV_RAM=y |
79 | CONFIG_BLK_DEV_RAM_COUNT=2 | 79 | CONFIG_BLK_DEV_RAM_COUNT=2 |
80 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 80 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
81 | CONFIG_MISC_DEVICES=y | ||
81 | CONFIG_EEPROM_LEGACY=m | 82 | CONFIG_EEPROM_LEGACY=m |
82 | CONFIG_BLK_DEV_SD=y | 83 | CONFIG_BLK_DEV_SD=y |
83 | CONFIG_CHR_DEV_SG=y | 84 | CONFIG_CHR_DEV_SG=y |
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index 62db8a3df162..c02bbb2fddf8 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig | |||
@@ -61,6 +61,7 @@ CONFIG_BLK_DEV_RAM=y | |||
61 | CONFIG_BLK_DEV_RAM_COUNT=1 | 61 | CONFIG_BLK_DEV_RAM_COUNT=1 |
62 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 62 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
63 | CONFIG_BLK_DEV_XIP=y | 63 | CONFIG_BLK_DEV_XIP=y |
64 | CONFIG_MISC_DEVICES=y | ||
64 | CONFIG_EEPROM_AT24=y | 65 | CONFIG_EEPROM_AT24=y |
65 | CONFIG_SCSI=y | 66 | CONFIG_SCSI=y |
66 | # CONFIG_SCSI_PROC_FS is not set | 67 | # CONFIG_SCSI_PROC_FS is not set |
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index 7376e27b8ed4..e63f537b854a 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig | |||
@@ -52,6 +52,7 @@ CONFIG_PROC_DEVICETREE=y | |||
52 | CONFIG_BLK_DEV_LOOP=y | 52 | CONFIG_BLK_DEV_LOOP=y |
53 | CONFIG_BLK_DEV_RAM=y | 53 | CONFIG_BLK_DEV_RAM=y |
54 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 54 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
55 | CONFIG_MISC_DEVICES=y | ||
55 | CONFIG_EEPROM_AT24=y | 56 | CONFIG_EEPROM_AT24=y |
56 | CONFIG_SCSI_TGT=y | 57 | CONFIG_SCSI_TGT=y |
57 | CONFIG_BLK_DEV_SD=y | 58 | CONFIG_BLK_DEV_SD=y |
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index 99a19d1e9bf8..c06a86c33098 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig | |||
@@ -82,6 +82,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
82 | CONFIG_BLK_DEV_NBD=y | 82 | CONFIG_BLK_DEV_NBD=y |
83 | CONFIG_BLK_DEV_RAM=y | 83 | CONFIG_BLK_DEV_RAM=y |
84 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 84 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
85 | CONFIG_MISC_DEVICES=y | ||
85 | CONFIG_EEPROM_LEGACY=y | 86 | CONFIG_EEPROM_LEGACY=y |
86 | CONFIG_BLK_DEV_SD=y | 87 | CONFIG_BLK_DEV_SD=y |
87 | CONFIG_CHR_DEV_ST=y | 88 | CONFIG_CHR_DEV_ST=y |
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index c636f23f8c92..942ced90557c 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig | |||
@@ -84,6 +84,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
84 | CONFIG_BLK_DEV_NBD=y | 84 | CONFIG_BLK_DEV_NBD=y |
85 | CONFIG_BLK_DEV_RAM=y | 85 | CONFIG_BLK_DEV_RAM=y |
86 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 86 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
87 | CONFIG_MISC_DEVICES=y | ||
87 | CONFIG_EEPROM_LEGACY=y | 88 | CONFIG_EEPROM_LEGACY=y |
88 | CONFIG_BLK_DEV_SD=y | 89 | CONFIG_BLK_DEV_SD=y |
89 | CONFIG_CHR_DEV_ST=y | 90 | CONFIG_CHR_DEV_ST=y |
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig index 55b54318fef6..038a308cbfc4 100644 --- a/arch/powerpc/configs/mpc86xx_defconfig +++ b/arch/powerpc/configs/mpc86xx_defconfig | |||
@@ -66,6 +66,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
66 | CONFIG_BLK_DEV_NBD=y | 66 | CONFIG_BLK_DEV_NBD=y |
67 | CONFIG_BLK_DEV_RAM=y | 67 | CONFIG_BLK_DEV_RAM=y |
68 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 68 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
69 | CONFIG_MISC_DEVICES=y | ||
69 | CONFIG_EEPROM_LEGACY=y | 70 | CONFIG_EEPROM_LEGACY=y |
70 | CONFIG_BLK_DEV_SD=y | 71 | CONFIG_BLK_DEV_SD=y |
71 | CONFIG_CHR_DEV_ST=y | 72 | CONFIG_CHR_DEV_ST=y |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index edd2d54c8196..f4deb0b78cf0 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -59,6 +59,7 @@ CONFIG_PROC_DEVICETREE=y | |||
59 | CONFIG_BLK_DEV_LOOP=y | 59 | CONFIG_BLK_DEV_LOOP=y |
60 | CONFIG_BLK_DEV_RAM=y | 60 | CONFIG_BLK_DEV_RAM=y |
61 | CONFIG_BLK_DEV_RAM_SIZE=16384 | 61 | CONFIG_BLK_DEV_RAM_SIZE=16384 |
62 | CONFIG_MISC_DEVICES=y | ||
62 | CONFIG_EEPROM_LEGACY=y | 63 | CONFIG_EEPROM_LEGACY=y |
63 | CONFIG_IDE=y | 64 | CONFIG_IDE=y |
64 | CONFIG_BLK_DEV_IDECD=y | 65 | CONFIG_BLK_DEV_IDECD=y |
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 9d64a6822d86..0a10fb009ef7 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig | |||
@@ -398,6 +398,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 | |||
398 | CONFIG_CDROM_PKTCDVD=m | 398 | CONFIG_CDROM_PKTCDVD=m |
399 | CONFIG_VIRTIO_BLK=m | 399 | CONFIG_VIRTIO_BLK=m |
400 | CONFIG_BLK_DEV_HD=y | 400 | CONFIG_BLK_DEV_HD=y |
401 | CONFIG_MISC_DEVICES=y | ||
401 | CONFIG_ENCLOSURE_SERVICES=m | 402 | CONFIG_ENCLOSURE_SERVICES=m |
402 | CONFIG_SENSORS_TSL2550=m | 403 | CONFIG_SENSORS_TSL2550=m |
403 | CONFIG_EEPROM_AT24=m | 404 | CONFIG_EEPROM_AT24=m |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 9c3f22c6cde1..249ddd0a27cd 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -189,6 +189,7 @@ CONFIG_TIGON3=y | |||
189 | CONFIG_BNX2=m | 189 | CONFIG_BNX2=m |
190 | CONFIG_CHELSIO_T1=m | 190 | CONFIG_CHELSIO_T1=m |
191 | CONFIG_CHELSIO_T3=m | 191 | CONFIG_CHELSIO_T3=m |
192 | CONFIG_CHELSIO_T4=m | ||
192 | CONFIG_EHEA=y | 193 | CONFIG_EHEA=y |
193 | CONFIG_IXGBE=m | 194 | CONFIG_IXGBE=m |
194 | CONFIG_IXGB=m | 195 | CONFIG_IXGB=m |
@@ -255,6 +256,8 @@ CONFIG_INFINIBAND_USER_MAD=m | |||
255 | CONFIG_INFINIBAND_USER_ACCESS=m | 256 | CONFIG_INFINIBAND_USER_ACCESS=m |
256 | CONFIG_INFINIBAND_MTHCA=m | 257 | CONFIG_INFINIBAND_MTHCA=m |
257 | CONFIG_INFINIBAND_EHCA=m | 258 | CONFIG_INFINIBAND_EHCA=m |
259 | CONFIG_INFINIBAND_CXGB3=m | ||
260 | CONFIG_INFINIBAND_CXGB4=m | ||
258 | CONFIG_MLX4_INFINIBAND=m | 261 | CONFIG_MLX4_INFINIBAND=m |
259 | CONFIG_INFINIBAND_IPOIB=m | 262 | CONFIG_INFINIBAND_IPOIB=m |
260 | CONFIG_INFINIBAND_IPOIB_CM=y | 263 | CONFIG_INFINIBAND_IPOIB_CM=y |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 6d2416a85709..dd70fac57ec8 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr); | |||
42 | extern void __dma_sync(void *vaddr, size_t size, int direction); | 42 | extern void __dma_sync(void *vaddr, size_t size, int direction); |
43 | extern void __dma_sync_page(struct page *page, unsigned long offset, | 43 | extern void __dma_sync_page(struct page *page, unsigned long offset, |
44 | size_t size, int direction); | 44 | size_t size, int direction); |
45 | extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); | ||
45 | 46 | ||
46 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ | 47 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ |
47 | /* | 48 | /* |
@@ -198,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | |||
198 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 199 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
199 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 200 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
200 | 201 | ||
202 | extern int dma_mmap_coherent(struct device *, struct vm_area_struct *, | ||
203 | void *, dma_addr_t, size_t); | ||
204 | #define ARCH_HAS_DMA_MMAP_COHERENT | ||
205 | |||
206 | |||
201 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 207 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
202 | enum dma_data_direction direction) | 208 | enum dma_data_direction direction) |
203 | { | 209 | { |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index fe56a23e1ff0..e4f01915fbb0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -35,9 +35,9 @@ struct smp_ops_t { | |||
35 | int (*probe)(void); | 35 | int (*probe)(void); |
36 | void (*kick_cpu)(int nr); | 36 | void (*kick_cpu)(int nr); |
37 | void (*setup_cpu)(int nr); | 37 | void (*setup_cpu)(int nr); |
38 | void (*bringup_done)(void); | ||
38 | void (*take_timebase)(void); | 39 | void (*take_timebase)(void); |
39 | void (*give_timebase)(void); | 40 | void (*give_timebase)(void); |
40 | int (*cpu_enable)(unsigned int nr); | ||
41 | int (*cpu_disable)(void); | 41 | int (*cpu_disable)(void); |
42 | void (*cpu_die)(unsigned int nr); | 42 | void (*cpu_die)(unsigned int nr); |
43 | int (*cpu_bootable)(unsigned int nr); | 43 | int (*cpu_bootable)(unsigned int nr); |
@@ -267,7 +267,6 @@ struct machdep_calls { | |||
267 | 267 | ||
268 | extern void e500_idle(void); | 268 | extern void e500_idle(void); |
269 | extern void power4_idle(void); | 269 | extern void power4_idle(void); |
270 | extern void power4_cpu_offline_powersave(void); | ||
271 | extern void ppc6xx_idle(void); | 270 | extern void ppc6xx_idle(void); |
272 | extern void book3e_idle(void); | 271 | extern void book3e_idle(void); |
273 | 272 | ||
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index acac35d5b382..ae7b3efec8e5 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #define STE_VSID_SHIFT 12 | 27 | #define STE_VSID_SHIFT 12 |
28 | 28 | ||
29 | /* Location of cpu0's segment table */ | 29 | /* Location of cpu0's segment table */ |
30 | #define STAB0_PAGE 0x6 | 30 | #define STAB0_PAGE 0x8 |
31 | #define STAB0_OFFSET (STAB0_PAGE << 12) | 31 | #define STAB0_OFFSET (STAB0_PAGE << 12) |
32 | #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) | 32 | #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) |
33 | 33 | ||
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index da4b20008541..2cd664ef0a5e 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -100,7 +100,7 @@ extern phys_addr_t kernstart_addr; | |||
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | #ifdef CONFIG_FLATMEM | 102 | #ifdef CONFIG_FLATMEM |
103 | #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) | 103 | #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) |
104 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) | 104 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) |
105 | #endif | 105 | #endif |
106 | 106 | ||
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h index 9e2cb2019161..f706164b0bd0 100644 --- a/arch/powerpc/include/asm/qe_ic.h +++ b/arch/powerpc/include/asm/qe_ic.h | |||
@@ -81,7 +81,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); | |||
81 | static inline void qe_ic_cascade_low_ipic(unsigned int irq, | 81 | static inline void qe_ic_cascade_low_ipic(unsigned int irq, |
82 | struct irq_desc *desc) | 82 | struct irq_desc *desc) |
83 | { | 83 | { |
84 | struct qe_ic *qe_ic = get_irq_desc_data(desc); | 84 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
85 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | 85 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); |
86 | 86 | ||
87 | if (cascade_irq != NO_IRQ) | 87 | if (cascade_irq != NO_IRQ) |
@@ -91,7 +91,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq, | |||
91 | static inline void qe_ic_cascade_high_ipic(unsigned int irq, | 91 | static inline void qe_ic_cascade_high_ipic(unsigned int irq, |
92 | struct irq_desc *desc) | 92 | struct irq_desc *desc) |
93 | { | 93 | { |
94 | struct qe_ic *qe_ic = get_irq_desc_data(desc); | 94 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
95 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | 95 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); |
96 | 96 | ||
97 | if (cascade_irq != NO_IRQ) | 97 | if (cascade_irq != NO_IRQ) |
@@ -101,9 +101,9 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq, | |||
101 | static inline void qe_ic_cascade_low_mpic(unsigned int irq, | 101 | static inline void qe_ic_cascade_low_mpic(unsigned int irq, |
102 | struct irq_desc *desc) | 102 | struct irq_desc *desc) |
103 | { | 103 | { |
104 | struct qe_ic *qe_ic = get_irq_desc_data(desc); | 104 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
105 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | 105 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); |
106 | struct irq_chip *chip = get_irq_desc_chip(desc); | 106 | struct irq_chip *chip = irq_desc_get_chip(desc); |
107 | 107 | ||
108 | if (cascade_irq != NO_IRQ) | 108 | if (cascade_irq != NO_IRQ) |
109 | generic_handle_irq(cascade_irq); | 109 | generic_handle_irq(cascade_irq); |
@@ -114,9 +114,9 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq, | |||
114 | static inline void qe_ic_cascade_high_mpic(unsigned int irq, | 114 | static inline void qe_ic_cascade_high_mpic(unsigned int irq, |
115 | struct irq_desc *desc) | 115 | struct irq_desc *desc) |
116 | { | 116 | { |
117 | struct qe_ic *qe_ic = get_irq_desc_data(desc); | 117 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
118 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | 118 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); |
119 | struct irq_chip *chip = get_irq_desc_chip(desc); | 119 | struct irq_chip *chip = irq_desc_get_chip(desc); |
120 | 120 | ||
121 | if (cascade_irq != NO_IRQ) | 121 | if (cascade_irq != NO_IRQ) |
122 | generic_handle_irq(cascade_irq); | 122 | generic_handle_irq(cascade_irq); |
@@ -127,9 +127,9 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq, | |||
127 | static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, | 127 | static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, |
128 | struct irq_desc *desc) | 128 | struct irq_desc *desc) |
129 | { | 129 | { |
130 | struct qe_ic *qe_ic = get_irq_desc_data(desc); | 130 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
131 | unsigned int cascade_irq; | 131 | unsigned int cascade_irq; |
132 | struct irq_chip *chip = get_irq_desc_chip(desc); | 132 | struct irq_chip *chip = irq_desc_get_chip(desc); |
133 | 133 | ||
134 | cascade_irq = qe_ic_get_high_irq(qe_ic); | 134 | cascade_irq = qe_ic_get_high_irq(qe_ic); |
135 | if (cascade_irq == NO_IRQ) | 135 | if (cascade_irq == NO_IRQ) |
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 86ad8128963a..3b1a9b707362 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
@@ -110,7 +110,7 @@ | |||
110 | #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ | 110 | #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ |
111 | #define SPRN_MAS3 0x273 /* MMU Assist Register 3 */ | 111 | #define SPRN_MAS3 0x273 /* MMU Assist Register 3 */ |
112 | #define SPRN_MAS4 0x274 /* MMU Assist Register 4 */ | 112 | #define SPRN_MAS4 0x274 /* MMU Assist Register 4 */ |
113 | #define SPRN_MAS5 0x275 /* MMU Assist Register 5 */ | 113 | #define SPRN_MAS5 0x153 /* MMU Assist Register 5 */ |
114 | #define SPRN_MAS6 0x276 /* MMU Assist Register 6 */ | 114 | #define SPRN_MAS6 0x276 /* MMU Assist Register 6 */ |
115 | #define SPRN_PID1 0x279 /* Process ID Register 1 */ | 115 | #define SPRN_PID1 0x279 /* Process ID Register 1 */ |
116 | #define SPRN_PID2 0x27A /* Process ID Register 2 */ | 116 | #define SPRN_PID2 0x27A /* Process ID Register 2 */ |
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 66e237bbe15f..a902a0d3ae0d 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h | |||
@@ -36,15 +36,16 @@ extern void cpu_die(void); | |||
36 | 36 | ||
37 | extern void smp_send_debugger_break(int cpu); | 37 | extern void smp_send_debugger_break(int cpu); |
38 | extern void smp_message_recv(int); | 38 | extern void smp_message_recv(int); |
39 | extern void start_secondary_resume(void); | ||
39 | 40 | ||
40 | DECLARE_PER_CPU(unsigned int, cpu_pvr); | 41 | DECLARE_PER_CPU(unsigned int, cpu_pvr); |
41 | 42 | ||
42 | #ifdef CONFIG_HOTPLUG_CPU | 43 | #ifdef CONFIG_HOTPLUG_CPU |
43 | extern void fixup_irqs(const struct cpumask *map); | 44 | extern void migrate_irqs(void); |
44 | int generic_cpu_disable(void); | 45 | int generic_cpu_disable(void); |
45 | int generic_cpu_enable(unsigned int cpu); | ||
46 | void generic_cpu_die(unsigned int cpu); | 46 | void generic_cpu_die(unsigned int cpu); |
47 | void generic_mach_cpu_die(void); | 47 | void generic_mach_cpu_die(void); |
48 | void generic_set_cpu_dead(unsigned int cpu); | ||
48 | #endif | 49 | #endif |
49 | 50 | ||
50 | #ifdef CONFIG_PPC64 | 51 | #ifdef CONFIG_PPC64 |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index aa0f1ebb4aaf..60f64b132bd4 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -348,3 +348,7 @@ COMPAT_SYS_SPU(sendmsg) | |||
348 | COMPAT_SYS_SPU(recvmsg) | 348 | COMPAT_SYS_SPU(recvmsg) |
349 | COMPAT_SYS_SPU(recvmmsg) | 349 | COMPAT_SYS_SPU(recvmmsg) |
350 | SYSCALL_SPU(accept4) | 350 | SYSCALL_SPU(accept4) |
351 | SYSCALL_SPU(name_to_handle_at) | ||
352 | COMPAT_SYS_SPU(open_by_handle_at) | ||
353 | COMPAT_SYS_SPU(clock_adjtime) | ||
354 | SYSCALL_SPU(syncfs) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 6151937657f6..3c215648ce6d 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -367,10 +367,14 @@ | |||
367 | #define __NR_recvmsg 342 | 367 | #define __NR_recvmsg 342 |
368 | #define __NR_recvmmsg 343 | 368 | #define __NR_recvmmsg 343 |
369 | #define __NR_accept4 344 | 369 | #define __NR_accept4 344 |
370 | #define __NR_name_to_handle_at 345 | ||
371 | #define __NR_open_by_handle_at 346 | ||
372 | #define __NR_clock_adjtime 347 | ||
373 | #define __NR_syncfs 348 | ||
370 | 374 | ||
371 | #ifdef __KERNEL__ | 375 | #ifdef __KERNEL__ |
372 | 376 | ||
373 | #define __NR_syscalls 345 | 377 | #define __NR_syscalls 349 |
374 | 378 | ||
375 | #define __NR__exit __NR_exit | 379 | #define __NR__exit __NR_exit |
376 | #define NR_syscalls __NR_syscalls | 380 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index cf02cad62d9a..d238c082c3c5 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -179,3 +179,21 @@ static int __init dma_init(void) | |||
179 | return 0; | 179 | return 0; |
180 | } | 180 | } |
181 | fs_initcall(dma_init); | 181 | fs_initcall(dma_init); |
182 | |||
183 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
184 | void *cpu_addr, dma_addr_t handle, size_t size) | ||
185 | { | ||
186 | unsigned long pfn; | ||
187 | |||
188 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
189 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
190 | pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); | ||
191 | #else | ||
192 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | ||
193 | #endif | ||
194 | return remap_pfn_range(vma, vma->vm_start, | ||
195 | pfn + vma->vm_pgoff, | ||
196 | vma->vm_end - vma->vm_start, | ||
197 | vma->vm_page_prot); | ||
198 | } | ||
199 | EXPORT_SYMBOL_GPL(dma_mmap_coherent); | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 8a817995b4cd..c532cb2c927a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -977,20 +977,6 @@ _GLOBAL(do_stab_bolted) | |||
977 | rfid | 977 | rfid |
978 | b . /* prevent speculative execution */ | 978 | b . /* prevent speculative execution */ |
979 | 979 | ||
980 | /* | ||
981 | * Space for CPU0's segment table. | ||
982 | * | ||
983 | * On iSeries, the hypervisor must fill in at least one entry before | ||
984 | * we get control (with relocate on). The address is given to the hv | ||
985 | * as a page number (see xLparMap below), so this must be at a | ||
986 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
987 | * PAGE_SHIFT). | ||
988 | */ | ||
989 | . = STAB0_OFFSET /* 0x6000 */ | ||
990 | .globl initial_stab | ||
991 | initial_stab: | ||
992 | .space 4096 | ||
993 | |||
994 | #ifdef CONFIG_PPC_PSERIES | 980 | #ifdef CONFIG_PPC_PSERIES |
995 | /* | 981 | /* |
996 | * Data area reserved for FWNMI option. | 982 | * Data area reserved for FWNMI option. |
@@ -1027,3 +1013,17 @@ xLparMap: | |||
1027 | #ifdef CONFIG_PPC_PSERIES | 1013 | #ifdef CONFIG_PPC_PSERIES |
1028 | . = 0x8000 | 1014 | . = 0x8000 |
1029 | #endif /* CONFIG_PPC_PSERIES */ | 1015 | #endif /* CONFIG_PPC_PSERIES */ |
1016 | |||
1017 | /* | ||
1018 | * Space for CPU0's segment table. | ||
1019 | * | ||
1020 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1021 | * we get control (with relocate on). The address is given to the hv | ||
1022 | * as a page number (see xLparMap above), so this must be at a | ||
1023 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1024 | * PAGE_SHIFT). | ||
1025 | */ | ||
1026 | . = STAB0_OFFSET /* 0x8000 */ | ||
1027 | .globl initial_stab | ||
1028 | initial_stab: | ||
1029 | .space 4096 | ||
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 98c4b29a56f4..c5c24beb8387 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -890,6 +890,15 @@ __secondary_start: | |||
890 | mtspr SPRN_SRR1,r4 | 890 | mtspr SPRN_SRR1,r4 |
891 | SYNC | 891 | SYNC |
892 | RFI | 892 | RFI |
893 | |||
894 | _GLOBAL(start_secondary_resume) | ||
895 | /* Reset stack */ | ||
896 | rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | ||
897 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
898 | li r3,0 | ||
899 | std r3,0(r1) /* Zero the stack frame pointer */ | ||
900 | bl start_secondary | ||
901 | b . | ||
893 | #endif /* CONFIG_SMP */ | 902 | #endif /* CONFIG_SMP */ |
894 | 903 | ||
895 | #ifdef CONFIG_KVM_BOOK3S_HANDLER | 904 | #ifdef CONFIG_KVM_BOOK3S_HANDLER |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 782f23df7c85..271140b38b6f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start) | |||
536 | add r13,r13,r4 /* for this processor. */ | 536 | add r13,r13,r4 /* for this processor. */ |
537 | mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ | 537 | mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ |
538 | 538 | ||
539 | /* Mark interrupts soft and hard disabled (they might be enabled | ||
540 | * in the PACA when doing hotplug) | ||
541 | */ | ||
542 | li r0,0 | ||
543 | stb r0,PACASOFTIRQEN(r13) | ||
544 | stb r0,PACAHARDIRQEN(r13) | ||
545 | |||
539 | /* Create a temp kernel stack for use before relocation is on. */ | 546 | /* Create a temp kernel stack for use before relocation is on. */ |
540 | ld r1,PACAEMERGSP(r13) | 547 | ld r1,PACAEMERGSP(r13) |
541 | subi r1,r1,STACK_FRAME_OVERHEAD | 548 | subi r1,r1,STACK_FRAME_OVERHEAD |
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 5328709eeedc..ba3195478600 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S | |||
@@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
53 | isync | 53 | isync |
54 | b 1b | 54 | b 1b |
55 | 55 | ||
56 | _GLOBAL(power4_cpu_offline_powersave) | ||
57 | /* Go to NAP now */ | ||
58 | mfmsr r7 | ||
59 | rldicl r0,r7,48,1 | ||
60 | rotldi r0,r0,16 | ||
61 | mtmsrd r0,1 /* hard-disable interrupts */ | ||
62 | li r0,1 | ||
63 | li r6,0 | ||
64 | stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ | ||
65 | stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ | ||
66 | BEGIN_FTR_SECTION | ||
67 | DSSALL | ||
68 | sync | ||
69 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
70 | ori r7,r7,MSR_EE | ||
71 | oris r7,r7,MSR_POW@h | ||
72 | sync | ||
73 | isync | ||
74 | mtmsrd r7 | ||
75 | isync | ||
76 | blr | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 63625e0650b5..f621b7d2d869 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -246,12 +246,13 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
246 | } | 246 | } |
247 | 247 | ||
248 | #ifdef CONFIG_HOTPLUG_CPU | 248 | #ifdef CONFIG_HOTPLUG_CPU |
249 | void fixup_irqs(const struct cpumask *map) | 249 | void migrate_irqs(void) |
250 | { | 250 | { |
251 | struct irq_desc *desc; | 251 | struct irq_desc *desc; |
252 | unsigned int irq; | 252 | unsigned int irq; |
253 | static int warned; | 253 | static int warned; |
254 | cpumask_var_t mask; | 254 | cpumask_var_t mask; |
255 | const struct cpumask *map = cpu_online_mask; | ||
255 | 256 | ||
256 | alloc_cpumask_var(&mask, GFP_KERNEL); | 257 | alloc_cpumask_var(&mask, GFP_KERNEL); |
257 | 258 | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 981360509172..cbdbb14be4b0 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -57,6 +57,25 @@ | |||
57 | #define DBG(fmt...) | 57 | #define DBG(fmt...) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | |||
61 | /* Store all idle threads, this can be reused instead of creating | ||
62 | * a new thread. Also avoids complicated thread destroy functionality | ||
63 | * for idle threads. | ||
64 | */ | ||
65 | #ifdef CONFIG_HOTPLUG_CPU | ||
66 | /* | ||
67 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | ||
68 | * removed after init for !CONFIG_HOTPLUG_CPU. | ||
69 | */ | ||
70 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | ||
71 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | ||
72 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | ||
73 | #else | ||
74 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | ||
75 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
76 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | ||
77 | #endif | ||
78 | |||
60 | struct thread_info *secondary_ti; | 79 | struct thread_info *secondary_ti; |
61 | 80 | ||
62 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); | 81 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
@@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id) | |||
238 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); | 257 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
239 | } | 258 | } |
240 | 259 | ||
241 | static void __init smp_create_idle(unsigned int cpu) | ||
242 | { | ||
243 | struct task_struct *p; | ||
244 | |||
245 | /* create a process for the processor */ | ||
246 | p = fork_idle(cpu); | ||
247 | if (IS_ERR(p)) | ||
248 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
249 | #ifdef CONFIG_PPC64 | ||
250 | paca[cpu].__current = p; | ||
251 | paca[cpu].kstack = (unsigned long) task_thread_info(p) | ||
252 | + THREAD_SIZE - STACK_FRAME_OVERHEAD; | ||
253 | #endif | ||
254 | current_set[cpu] = task_thread_info(p); | ||
255 | task_thread_info(p)->cpu = cpu; | ||
256 | } | ||
257 | |||
258 | void __init smp_prepare_cpus(unsigned int max_cpus) | 260 | void __init smp_prepare_cpus(unsigned int max_cpus) |
259 | { | 261 | { |
260 | unsigned int cpu; | 262 | unsigned int cpu; |
@@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
288 | max_cpus = NR_CPUS; | 290 | max_cpus = NR_CPUS; |
289 | else | 291 | else |
290 | max_cpus = 1; | 292 | max_cpus = 1; |
291 | |||
292 | for_each_possible_cpu(cpu) | ||
293 | if (cpu != boot_cpuid) | ||
294 | smp_create_idle(cpu); | ||
295 | } | 293 | } |
296 | 294 | ||
297 | void __devinit smp_prepare_boot_cpu(void) | 295 | void __devinit smp_prepare_boot_cpu(void) |
@@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void) | |||
305 | 303 | ||
306 | #ifdef CONFIG_HOTPLUG_CPU | 304 | #ifdef CONFIG_HOTPLUG_CPU |
307 | /* State of each CPU during hotplug phases */ | 305 | /* State of each CPU during hotplug phases */ |
308 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 306 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
309 | 307 | ||
310 | int generic_cpu_disable(void) | 308 | int generic_cpu_disable(void) |
311 | { | 309 | { |
@@ -317,30 +315,8 @@ int generic_cpu_disable(void) | |||
317 | set_cpu_online(cpu, false); | 315 | set_cpu_online(cpu, false); |
318 | #ifdef CONFIG_PPC64 | 316 | #ifdef CONFIG_PPC64 |
319 | vdso_data->processorCount--; | 317 | vdso_data->processorCount--; |
320 | fixup_irqs(cpu_online_mask); | ||
321 | #endif | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | int generic_cpu_enable(unsigned int cpu) | ||
326 | { | ||
327 | /* Do the normal bootup if we haven't | ||
328 | * already bootstrapped. */ | ||
329 | if (system_state != SYSTEM_RUNNING) | ||
330 | return -ENOSYS; | ||
331 | |||
332 | /* get the target out of it's holding state */ | ||
333 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
334 | smp_wmb(); | ||
335 | |||
336 | while (!cpu_online(cpu)) | ||
337 | cpu_relax(); | ||
338 | |||
339 | #ifdef CONFIG_PPC64 | ||
340 | fixup_irqs(cpu_online_mask); | ||
341 | /* counter the irq disable in fixup_irqs */ | ||
342 | local_irq_enable(); | ||
343 | #endif | 318 | #endif |
319 | migrate_irqs(); | ||
344 | return 0; | 320 | return 0; |
345 | } | 321 | } |
346 | 322 | ||
@@ -362,37 +338,89 @@ void generic_mach_cpu_die(void) | |||
362 | unsigned int cpu; | 338 | unsigned int cpu; |
363 | 339 | ||
364 | local_irq_disable(); | 340 | local_irq_disable(); |
341 | idle_task_exit(); | ||
365 | cpu = smp_processor_id(); | 342 | cpu = smp_processor_id(); |
366 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | 343 | printk(KERN_DEBUG "CPU%d offline\n", cpu); |
367 | __get_cpu_var(cpu_state) = CPU_DEAD; | 344 | __get_cpu_var(cpu_state) = CPU_DEAD; |
368 | smp_wmb(); | 345 | smp_wmb(); |
369 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | 346 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
370 | cpu_relax(); | 347 | cpu_relax(); |
371 | set_cpu_online(cpu, true); | 348 | } |
372 | local_irq_enable(); | 349 | |
350 | void generic_set_cpu_dead(unsigned int cpu) | ||
351 | { | ||
352 | per_cpu(cpu_state, cpu) = CPU_DEAD; | ||
373 | } | 353 | } |
374 | #endif | 354 | #endif |
375 | 355 | ||
376 | static int __devinit cpu_enable(unsigned int cpu) | 356 | struct create_idle { |
357 | struct work_struct work; | ||
358 | struct task_struct *idle; | ||
359 | struct completion done; | ||
360 | int cpu; | ||
361 | }; | ||
362 | |||
363 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
377 | { | 364 | { |
378 | if (smp_ops && smp_ops->cpu_enable) | 365 | struct create_idle *c_idle = |
379 | return smp_ops->cpu_enable(cpu); | 366 | container_of(work, struct create_idle, work); |
367 | |||
368 | c_idle->idle = fork_idle(c_idle->cpu); | ||
369 | complete(&c_idle->done); | ||
370 | } | ||
371 | |||
372 | static int __cpuinit create_idle(unsigned int cpu) | ||
373 | { | ||
374 | struct thread_info *ti; | ||
375 | struct create_idle c_idle = { | ||
376 | .cpu = cpu, | ||
377 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
378 | }; | ||
379 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
380 | |||
381 | c_idle.idle = get_idle_for_cpu(cpu); | ||
382 | |||
383 | /* We can't use kernel_thread since we must avoid to | ||
384 | * reschedule the child. We use a workqueue because | ||
385 | * we want to fork from a kernel thread, not whatever | ||
386 | * userspace process happens to be trying to online us. | ||
387 | */ | ||
388 | if (!c_idle.idle) { | ||
389 | schedule_work(&c_idle.work); | ||
390 | wait_for_completion(&c_idle.done); | ||
391 | } else | ||
392 | init_idle(c_idle.idle, cpu); | ||
393 | if (IS_ERR(c_idle.idle)) { | ||
394 | pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); | ||
395 | return PTR_ERR(c_idle.idle); | ||
396 | } | ||
397 | ti = task_thread_info(c_idle.idle); | ||
398 | |||
399 | #ifdef CONFIG_PPC64 | ||
400 | paca[cpu].__current = c_idle.idle; | ||
401 | paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; | ||
402 | #endif | ||
403 | ti->cpu = cpu; | ||
404 | current_set[cpu] = ti; | ||
380 | 405 | ||
381 | return -ENOSYS; | 406 | return 0; |
382 | } | 407 | } |
383 | 408 | ||
384 | int __cpuinit __cpu_up(unsigned int cpu) | 409 | int __cpuinit __cpu_up(unsigned int cpu) |
385 | { | 410 | { |
386 | int c; | 411 | int rc, c; |
387 | 412 | ||
388 | secondary_ti = current_set[cpu]; | 413 | secondary_ti = current_set[cpu]; |
389 | if (!cpu_enable(cpu)) | ||
390 | return 0; | ||
391 | 414 | ||
392 | if (smp_ops == NULL || | 415 | if (smp_ops == NULL || |
393 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | 416 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) |
394 | return -EINVAL; | 417 | return -EINVAL; |
395 | 418 | ||
419 | /* Make sure we have an idle thread */ | ||
420 | rc = create_idle(cpu); | ||
421 | if (rc) | ||
422 | return rc; | ||
423 | |||
396 | /* Make sure callin-map entry is 0 (can be leftover a CPU | 424 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
397 | * hotplug | 425 | * hotplug |
398 | */ | 426 | */ |
@@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu) | |||
502 | } | 530 | } |
503 | 531 | ||
504 | /* Activate a secondary processor. */ | 532 | /* Activate a secondary processor. */ |
505 | int __devinit start_secondary(void *unused) | 533 | void __devinit start_secondary(void *unused) |
506 | { | 534 | { |
507 | unsigned int cpu = smp_processor_id(); | 535 | unsigned int cpu = smp_processor_id(); |
508 | struct device_node *l2_cache; | 536 | struct device_node *l2_cache; |
@@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused) | |||
523 | 551 | ||
524 | secondary_cpu_time_init(); | 552 | secondary_cpu_time_init(); |
525 | 553 | ||
554 | #ifdef CONFIG_PPC64 | ||
555 | if (system_state == SYSTEM_RUNNING) | ||
556 | vdso_data->processorCount++; | ||
557 | #endif | ||
526 | ipi_call_lock(); | 558 | ipi_call_lock(); |
527 | notify_cpu_starting(cpu); | 559 | notify_cpu_starting(cpu); |
528 | set_cpu_online(cpu, true); | 560 | set_cpu_online(cpu, true); |
@@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused) | |||
558 | local_irq_enable(); | 590 | local_irq_enable(); |
559 | 591 | ||
560 | cpu_idle(); | 592 | cpu_idle(); |
561 | return 0; | 593 | |
594 | BUG(); | ||
562 | } | 595 | } |
563 | 596 | ||
564 | int setup_profiling_timer(unsigned int multiplier) | 597 | int setup_profiling_timer(unsigned int multiplier) |
@@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
585 | 618 | ||
586 | free_cpumask_var(old_mask); | 619 | free_cpumask_var(old_mask); |
587 | 620 | ||
621 | if (smp_ops && smp_ops->bringup_done) | ||
622 | smp_ops->bringup_done(); | ||
623 | |||
588 | dump_numa_cpu_topology(); | 624 | dump_numa_cpu_topology(); |
625 | |||
589 | } | 626 | } |
590 | 627 | ||
591 | int arch_sd_sibling_asym_packing(void) | 628 | int arch_sd_sibling_asym_packing(void) |
@@ -660,5 +697,9 @@ void cpu_die(void) | |||
660 | { | 697 | { |
661 | if (ppc_md.cpu_die) | 698 | if (ppc_md.cpu_die) |
662 | ppc_md.cpu_die(); | 699 | ppc_md.cpu_die(); |
700 | |||
701 | /* If we return, we re-enter start_secondary */ | ||
702 | start_secondary_resume(); | ||
663 | } | 703 | } |
704 | |||
664 | #endif | 705 | #endif |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 09d31dbf43f9..375480c56eb9 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -356,7 +356,7 @@ void account_system_vtime(struct task_struct *tsk) | |||
356 | } | 356 | } |
357 | get_paca()->user_time_scaled += user_scaled; | 357 | get_paca()->user_time_scaled += user_scaled; |
358 | 358 | ||
359 | if (in_irq() || idle_task(smp_processor_id()) != tsk) { | 359 | if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { |
360 | account_system_time(tsk, 0, delta, sys_scaled); | 360 | account_system_time(tsk, 0, delta, sys_scaled); |
361 | if (stolen) | 361 | if (stolen) |
362 | account_steal_time(stolen); | 362 | account_steal_time(stolen); |
@@ -577,14 +577,21 @@ void timer_interrupt(struct pt_regs * regs) | |||
577 | struct clock_event_device *evt = &decrementer->event; | 577 | struct clock_event_device *evt = &decrementer->event; |
578 | u64 now; | 578 | u64 now; |
579 | 579 | ||
580 | /* Ensure a positive value is written to the decrementer, or else | ||
581 | * some CPUs will continue to take decrementer exceptions. | ||
582 | */ | ||
583 | set_dec(DECREMENTER_MAX); | ||
584 | |||
585 | /* Some implementations of hotplug will get timer interrupts while | ||
586 | * offline, just ignore these | ||
587 | */ | ||
588 | if (!cpu_online(smp_processor_id())) | ||
589 | return; | ||
590 | |||
580 | trace_timer_interrupt_entry(regs); | 591 | trace_timer_interrupt_entry(regs); |
581 | 592 | ||
582 | __get_cpu_var(irq_stat).timer_irqs++; | 593 | __get_cpu_var(irq_stat).timer_irqs++; |
583 | 594 | ||
584 | /* Ensure a positive value is written to the decrementer, or else | ||
585 | * some CPUs will continuue to take decrementer exceptions */ | ||
586 | set_dec(DECREMENTER_MAX); | ||
587 | |||
588 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) | 595 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) |
589 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 596 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
590 | do_IRQ(regs); | 597 | do_IRQ(regs); |
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 757c0bed9a91..b42f76c4948d 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c | |||
@@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset, | |||
399 | #endif | 399 | #endif |
400 | } | 400 | } |
401 | EXPORT_SYMBOL(__dma_sync_page); | 401 | EXPORT_SYMBOL(__dma_sync_page); |
402 | |||
403 | /* | ||
404 | * Return the PFN for a given cpu virtual address returned by | ||
405 | * __dma_alloc_coherent. This is used by dma_mmap_coherent() | ||
406 | */ | ||
407 | unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr) | ||
408 | { | ||
409 | /* This should always be populated, so we don't test every | ||
410 | * level. If that fails, we'll have a nice crash which | ||
411 | * will be as good as a BUG_ON() | ||
412 | */ | ||
413 | pgd_t *pgd = pgd_offset_k(cpu_addr); | ||
414 | pud_t *pud = pud_offset(pgd, cpu_addr); | ||
415 | pmd_t *pmd = pmd_offset(pud, cpu_addr); | ||
416 | pte_t *ptep = pte_offset_kernel(pmd, cpu_addr); | ||
417 | |||
418 | if (pte_none(*ptep) || !pte_present(*ptep)) | ||
419 | return 0; | ||
420 | return pte_pfn(*ptep); | ||
421 | } | ||
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index a19bec078703..44cfd1bef89b 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -244,7 +244,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq, | |||
244 | break; | 244 | break; |
245 | case IIC_IRQ_TYPE_IOEXC: | 245 | case IIC_IRQ_TYPE_IOEXC: |
246 | irq_set_chip_and_handler(virq, &iic_ioexc_chip, | 246 | irq_set_chip_and_handler(virq, &iic_ioexc_chip, |
247 | handle_iic_irq); | 247 | handle_edge_eoi_irq); |
248 | break; | 248 | break; |
249 | default: | 249 | default: |
250 | irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); | 250 | irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); |
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index f0bc08f6c1f0..20468f49aec0 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -33,7 +33,6 @@ extern void pmac_setup_pci_dma(void); | |||
33 | extern void pmac_check_ht_link(void); | 33 | extern void pmac_check_ht_link(void); |
34 | 34 | ||
35 | extern void pmac_setup_smp(void); | 35 | extern void pmac_setup_smp(void); |
36 | extern void pmac32_cpu_die(void); | ||
37 | extern void low_cpu_die(void) __attribute__((noreturn)); | 36 | extern void low_cpu_die(void) __attribute__((noreturn)); |
38 | 37 | ||
39 | extern int pmac_nvram_init(void); | 38 | extern int pmac_nvram_init(void); |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index d5aceb7fb125..aa45281bd296 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -650,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) | |||
650 | return PCI_PROBE_NORMAL; | 650 | return PCI_PROBE_NORMAL; |
651 | return PCI_PROBE_DEVTREE; | 651 | return PCI_PROBE_DEVTREE; |
652 | } | 652 | } |
653 | |||
654 | #ifdef CONFIG_HOTPLUG_CPU | ||
655 | /* access per cpu vars from generic smp.c */ | ||
656 | DECLARE_PER_CPU(int, cpu_state); | ||
657 | |||
658 | static void pmac64_cpu_die(void) | ||
659 | { | ||
660 | /* | ||
661 | * turn off as much as possible, we'll be | ||
662 | * kicked out as this will only be invoked | ||
663 | * on core99 platforms for now ... | ||
664 | */ | ||
665 | |||
666 | printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); | ||
667 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
668 | smp_wmb(); | ||
669 | |||
670 | /* | ||
671 | * during the path that leads here preemption is disabled, | ||
672 | * reenable it now so that when coming up preempt count is | ||
673 | * zero correctly | ||
674 | */ | ||
675 | preempt_enable(); | ||
676 | |||
677 | /* | ||
678 | * hard-disable interrupts for the non-NAP case, the NAP code | ||
679 | * needs to re-enable interrupts (but soft-disables them) | ||
680 | */ | ||
681 | hard_irq_disable(); | ||
682 | |||
683 | while (1) { | ||
684 | /* let's not take timer interrupts too often ... */ | ||
685 | set_dec(0x7fffffff); | ||
686 | |||
687 | /* should always be true at this point */ | ||
688 | if (cpu_has_feature(CPU_FTR_CAN_NAP)) | ||
689 | power4_cpu_offline_powersave(); | ||
690 | else { | ||
691 | HMT_low(); | ||
692 | HMT_very_low(); | ||
693 | } | ||
694 | } | ||
695 | } | ||
696 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
697 | |||
698 | #endif /* CONFIG_PPC64 */ | 653 | #endif /* CONFIG_PPC64 */ |
699 | 654 | ||
700 | define_machine(powermac) { | 655 | define_machine(powermac) { |
@@ -726,15 +681,4 @@ define_machine(powermac) { | |||
726 | .pcibios_after_init = pmac_pcibios_after_init, | 681 | .pcibios_after_init = pmac_pcibios_after_init, |
727 | .phys_mem_access_prot = pci_phys_mem_access_prot, | 682 | .phys_mem_access_prot = pci_phys_mem_access_prot, |
728 | #endif | 683 | #endif |
729 | #ifdef CONFIG_HOTPLUG_CPU | ||
730 | #ifdef CONFIG_PPC64 | ||
731 | .cpu_die = pmac64_cpu_die, | ||
732 | #endif | ||
733 | #ifdef CONFIG_PPC32 | ||
734 | .cpu_die = pmac32_cpu_die, | ||
735 | #endif | ||
736 | #endif | ||
737 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) | ||
738 | .cpu_die = generic_mach_cpu_die, | ||
739 | #endif | ||
740 | }; | 684 | }; |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f4f8b6..a830c5e80657 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -840,92 +840,149 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) | |||
840 | 840 | ||
841 | /* Setup openpic */ | 841 | /* Setup openpic */ |
842 | mpic_setup_this_cpu(); | 842 | mpic_setup_this_cpu(); |
843 | } | ||
843 | 844 | ||
844 | if (cpu_nr == 0) { | 845 | #ifdef CONFIG_HOTPLUG_CPU |
845 | #ifdef CONFIG_PPC64 | 846 | static int smp_core99_cpu_notify(struct notifier_block *self, |
846 | extern void g5_phy_disable_cpu1(void); | 847 | unsigned long action, void *hcpu) |
848 | { | ||
849 | int rc; | ||
847 | 850 | ||
848 | /* Close i2c bus if it was used for tb sync */ | 851 | switch(action) { |
852 | case CPU_UP_PREPARE: | ||
853 | case CPU_UP_PREPARE_FROZEN: | ||
854 | /* Open i2c bus if it was used for tb sync */ | ||
849 | if (pmac_tb_clock_chip_host) { | 855 | if (pmac_tb_clock_chip_host) { |
850 | pmac_i2c_close(pmac_tb_clock_chip_host); | 856 | rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); |
851 | pmac_tb_clock_chip_host = NULL; | 857 | if (rc) { |
858 | pr_err("Failed to open i2c bus for time sync\n"); | ||
859 | return notifier_from_errno(rc); | ||
860 | } | ||
852 | } | 861 | } |
862 | break; | ||
863 | case CPU_ONLINE: | ||
864 | case CPU_UP_CANCELED: | ||
865 | /* Close i2c bus if it was used for tb sync */ | ||
866 | if (pmac_tb_clock_chip_host) | ||
867 | pmac_i2c_close(pmac_tb_clock_chip_host); | ||
868 | break; | ||
869 | default: | ||
870 | break; | ||
871 | } | ||
872 | return NOTIFY_OK; | ||
873 | } | ||
853 | 874 | ||
854 | /* If we didn't start the second CPU, we must take | 875 | static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { |
855 | * it off the bus | 876 | .notifier_call = smp_core99_cpu_notify, |
856 | */ | 877 | }; |
857 | if (of_machine_is_compatible("MacRISC4") && | 878 | #endif /* CONFIG_HOTPLUG_CPU */ |
858 | num_online_cpus() < 2) | 879 | |
859 | g5_phy_disable_cpu1(); | 880 | static void __init smp_core99_bringup_done(void) |
860 | #endif /* CONFIG_PPC64 */ | 881 | { |
882 | #ifdef CONFIG_PPC64 | ||
883 | extern void g5_phy_disable_cpu1(void); | ||
884 | |||
885 | /* Close i2c bus if it was used for tb sync */ | ||
886 | if (pmac_tb_clock_chip_host) | ||
887 | pmac_i2c_close(pmac_tb_clock_chip_host); | ||
861 | 888 | ||
862 | if (ppc_md.progress) | 889 | /* If we didn't start the second CPU, we must take |
863 | ppc_md.progress("core99_setup_cpu 0 done", 0x349); | 890 | * it off the bus. |
891 | */ | ||
892 | if (of_machine_is_compatible("MacRISC4") && | ||
893 | num_online_cpus() < 2) { | ||
894 | set_cpu_present(1, false); | ||
895 | g5_phy_disable_cpu1(); | ||
864 | } | 896 | } |
865 | } | 897 | #endif /* CONFIG_PPC64 */ |
866 | 898 | ||
899 | #ifdef CONFIG_HOTPLUG_CPU | ||
900 | register_cpu_notifier(&smp_core99_cpu_nb); | ||
901 | #endif | ||
902 | if (ppc_md.progress) | ||
903 | ppc_md.progress("smp_core99_bringup_done", 0x349); | ||
904 | } | ||
867 | 905 | ||
868 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) | 906 | #ifdef CONFIG_HOTPLUG_CPU |
869 | 907 | ||
870 | int smp_core99_cpu_disable(void) | 908 | static int smp_core99_cpu_disable(void) |
871 | { | 909 | { |
872 | set_cpu_online(smp_processor_id(), false); | 910 | int rc = generic_cpu_disable(); |
911 | if (rc) | ||
912 | return rc; | ||
873 | 913 | ||
874 | /* XXX reset cpu affinity here */ | ||
875 | mpic_cpu_set_priority(0xf); | 914 | mpic_cpu_set_priority(0xf); |
876 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | 915 | |
877 | mb(); | ||
878 | udelay(20); | ||
879 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
880 | return 0; | 916 | return 0; |
881 | } | 917 | } |
882 | 918 | ||
883 | static int cpu_dead[NR_CPUS]; | 919 | #ifdef CONFIG_PPC32 |
884 | 920 | ||
885 | void pmac32_cpu_die(void) | 921 | static void pmac_cpu_die(void) |
886 | { | 922 | { |
923 | int cpu = smp_processor_id(); | ||
924 | |||
887 | local_irq_disable(); | 925 | local_irq_disable(); |
888 | cpu_dead[smp_processor_id()] = 1; | 926 | idle_task_exit(); |
927 | pr_debug("CPU%d offline\n", cpu); | ||
928 | generic_set_cpu_dead(cpu); | ||
929 | smp_wmb(); | ||
889 | mb(); | 930 | mb(); |
890 | low_cpu_die(); | 931 | low_cpu_die(); |
891 | } | 932 | } |
892 | 933 | ||
893 | void smp_core99_cpu_die(unsigned int cpu) | 934 | #else /* CONFIG_PPC32 */ |
935 | |||
936 | static void pmac_cpu_die(void) | ||
894 | { | 937 | { |
895 | int timeout; | 938 | int cpu = smp_processor_id(); |
896 | 939 | ||
897 | timeout = 1000; | 940 | local_irq_disable(); |
898 | while (!cpu_dead[cpu]) { | 941 | idle_task_exit(); |
899 | if (--timeout == 0) { | 942 | |
900 | printk("CPU %u refused to die!\n", cpu); | 943 | /* |
901 | break; | 944 | * turn off as much as possible, we'll be |
902 | } | 945 | * kicked out as this will only be invoked |
903 | msleep(1); | 946 | * on core99 platforms for now ... |
947 | */ | ||
948 | |||
949 | printk(KERN_INFO "CPU#%d offline\n", cpu); | ||
950 | generic_set_cpu_dead(cpu); | ||
951 | smp_wmb(); | ||
952 | |||
953 | /* | ||
954 | * Re-enable interrupts. The NAP code needs to enable them | ||
955 | * anyways, do it now so we deal with the case where one already | ||
956 | * happened while soft-disabled. | ||
957 | * We shouldn't get any external interrupts, only decrementer, and the | ||
958 | * decrementer handler is safe for use on offline CPUs | ||
959 | */ | ||
960 | local_irq_enable(); | ||
961 | |||
962 | while (1) { | ||
963 | /* let's not take timer interrupts too often ... */ | ||
964 | set_dec(0x7fffffff); | ||
965 | |||
966 | /* Enter NAP mode */ | ||
967 | power4_idle(); | ||
904 | } | 968 | } |
905 | cpu_dead[cpu] = 0; | ||
906 | } | 969 | } |
907 | 970 | ||
908 | #endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ | 971 | #endif /* else CONFIG_PPC32 */ |
972 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
909 | 973 | ||
910 | /* Core99 Macs (dual G4s and G5s) */ | 974 | /* Core99 Macs (dual G4s and G5s) */ |
911 | struct smp_ops_t core99_smp_ops = { | 975 | struct smp_ops_t core99_smp_ops = { |
912 | .message_pass = smp_mpic_message_pass, | 976 | .message_pass = smp_mpic_message_pass, |
913 | .probe = smp_core99_probe, | 977 | .probe = smp_core99_probe, |
978 | .bringup_done = smp_core99_bringup_done, | ||
914 | .kick_cpu = smp_core99_kick_cpu, | 979 | .kick_cpu = smp_core99_kick_cpu, |
915 | .setup_cpu = smp_core99_setup_cpu, | 980 | .setup_cpu = smp_core99_setup_cpu, |
916 | .give_timebase = smp_core99_give_timebase, | 981 | .give_timebase = smp_core99_give_timebase, |
917 | .take_timebase = smp_core99_take_timebase, | 982 | .take_timebase = smp_core99_take_timebase, |
918 | #if defined(CONFIG_HOTPLUG_CPU) | 983 | #if defined(CONFIG_HOTPLUG_CPU) |
919 | # if defined(CONFIG_PPC32) | ||
920 | .cpu_disable = smp_core99_cpu_disable, | 984 | .cpu_disable = smp_core99_cpu_disable, |
921 | .cpu_die = smp_core99_cpu_die, | ||
922 | # endif | ||
923 | # if defined(CONFIG_PPC64) | ||
924 | .cpu_disable = generic_cpu_disable, | ||
925 | .cpu_die = generic_cpu_die, | 985 | .cpu_die = generic_cpu_die, |
926 | /* intentionally do *NOT* assign cpu_enable, | ||
927 | * the generic code will use kick_cpu then! */ | ||
928 | # endif | ||
929 | #endif | 986 | #endif |
930 | }; | 987 | }; |
931 | 988 | ||
@@ -957,5 +1014,10 @@ void __init pmac_setup_smp(void) | |||
957 | smp_ops = &psurge_smp_ops; | 1014 | smp_ops = &psurge_smp_ops; |
958 | } | 1015 | } |
959 | #endif /* CONFIG_PPC32 */ | 1016 | #endif /* CONFIG_PPC32 */ |
1017 | |||
1018 | #ifdef CONFIG_HOTPLUG_CPU | ||
1019 | ppc_md.cpu_die = pmac_cpu_die; | ||
1020 | #endif | ||
960 | } | 1021 | } |
961 | 1022 | ||
1023 | |||
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 419707b07248..00cc3a094885 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -480,8 +480,32 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, | |||
480 | const char *new_msgs, unsigned long new_len) | 480 | const char *new_msgs, unsigned long new_len) |
481 | { | 481 | { |
482 | static unsigned int oops_count = 0; | 482 | static unsigned int oops_count = 0; |
483 | static bool panicking = false; | ||
483 | size_t text_len; | 484 | size_t text_len; |
484 | 485 | ||
486 | switch (reason) { | ||
487 | case KMSG_DUMP_RESTART: | ||
488 | case KMSG_DUMP_HALT: | ||
489 | case KMSG_DUMP_POWEROFF: | ||
490 | /* These are almost always orderly shutdowns. */ | ||
491 | return; | ||
492 | case KMSG_DUMP_OOPS: | ||
493 | case KMSG_DUMP_KEXEC: | ||
494 | break; | ||
495 | case KMSG_DUMP_PANIC: | ||
496 | panicking = true; | ||
497 | break; | ||
498 | case KMSG_DUMP_EMERG: | ||
499 | if (panicking) | ||
500 | /* Panic report already captured. */ | ||
501 | return; | ||
502 | break; | ||
503 | default: | ||
504 | pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", | ||
505 | __FUNCTION__, (int) reason); | ||
506 | return; | ||
507 | } | ||
508 | |||
485 | if (clobbering_unread_rtas_event()) | 509 | if (clobbering_unread_rtas_event()) |
486 | return; | 510 | return; |
487 | 511 | ||
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f480d931..08672d9136ab 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h | |||
@@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) | |||
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); | 36 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); |
37 | extern int start_secondary(void); | ||
38 | extern void start_secondary_resume(void); | ||
39 | #endif | 37 | #endif |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 0317cce877c6..d6479f9738f0 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -64,8 +64,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) | |||
64 | int qcss_tok = rtas_token("query-cpu-stopped-state"); | 64 | int qcss_tok = rtas_token("query-cpu-stopped-state"); |
65 | 65 | ||
66 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) { | 66 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) { |
67 | printk(KERN_INFO "Firmware doesn't support " | 67 | printk_once(KERN_INFO |
68 | "query-cpu-stopped-state\n"); | 68 | "Firmware doesn't support query-cpu-stopped-state\n"); |
69 | return QCSS_HARDWARE_ERROR; | 69 | return QCSS_HARDWARE_ERROR; |
70 | } | 70 | } |
71 | 71 | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 6c1e638f0ce9..ec8fe22047b7 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -204,33 +204,33 @@ static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, | |||
204 | 204 | ||
205 | static void xics_unmask_irq(struct irq_data *d) | 205 | static void xics_unmask_irq(struct irq_data *d) |
206 | { | 206 | { |
207 | unsigned int irq; | 207 | unsigned int hwirq; |
208 | int call_status; | 208 | int call_status; |
209 | int server; | 209 | int server; |
210 | 210 | ||
211 | pr_devel("xics: unmask virq %d\n", d->irq); | 211 | pr_devel("xics: unmask virq %d\n", d->irq); |
212 | 212 | ||
213 | irq = (unsigned int)irq_map[d->irq].hwirq; | 213 | hwirq = (unsigned int)irq_map[d->irq].hwirq; |
214 | pr_devel(" -> map to hwirq 0x%x\n", irq); | 214 | pr_devel(" -> map to hwirq 0x%x\n", hwirq); |
215 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | 215 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) |
216 | return; | 216 | return; |
217 | 217 | ||
218 | server = get_irq_server(d->irq, d->affinity, 0); | 218 | server = get_irq_server(d->irq, d->affinity, 0); |
219 | 219 | ||
220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, | 220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server, |
221 | DEFAULT_PRIORITY); | 221 | DEFAULT_PRIORITY); |
222 | if (call_status != 0) { | 222 | if (call_status != 0) { |
223 | printk(KERN_ERR | 223 | printk(KERN_ERR |
224 | "%s: ibm_set_xive irq %u server %x returned %d\n", | 224 | "%s: ibm_set_xive irq %u server %x returned %d\n", |
225 | __func__, irq, server, call_status); | 225 | __func__, hwirq, server, call_status); |
226 | return; | 226 | return; |
227 | } | 227 | } |
228 | 228 | ||
229 | /* Now unmask the interrupt (often a no-op) */ | 229 | /* Now unmask the interrupt (often a no-op) */ |
230 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); | 230 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq); |
231 | if (call_status != 0) { | 231 | if (call_status != 0) { |
232 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", | 232 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", |
233 | __func__, irq, call_status); | 233 | __func__, hwirq, call_status); |
234 | return; | 234 | return; |
235 | } | 235 | } |
236 | } | 236 | } |
@@ -250,46 +250,46 @@ static unsigned int xics_startup(struct irq_data *d) | |||
250 | return 0; | 250 | return 0; |
251 | } | 251 | } |
252 | 252 | ||
253 | static void xics_mask_real_irq(struct irq_data *d) | 253 | static void xics_mask_real_irq(unsigned int hwirq) |
254 | { | 254 | { |
255 | int call_status; | 255 | int call_status; |
256 | 256 | ||
257 | if (d->irq == XICS_IPI) | 257 | if (hwirq == XICS_IPI) |
258 | return; | 258 | return; |
259 | 259 | ||
260 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, d->irq); | 260 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq); |
261 | if (call_status != 0) { | 261 | if (call_status != 0) { |
262 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", | 262 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", |
263 | __func__, d->irq, call_status); | 263 | __func__, hwirq, call_status); |
264 | return; | 264 | return; |
265 | } | 265 | } |
266 | 266 | ||
267 | /* Have to set XIVE to 0xff to be able to remove a slot */ | 267 | /* Have to set XIVE to 0xff to be able to remove a slot */ |
268 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, d->irq, | 268 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, |
269 | default_server, 0xff); | 269 | default_server, 0xff); |
270 | if (call_status != 0) { | 270 | if (call_status != 0) { |
271 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", | 271 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", |
272 | __func__, d->irq, call_status); | 272 | __func__, hwirq, call_status); |
273 | return; | 273 | return; |
274 | } | 274 | } |
275 | } | 275 | } |
276 | 276 | ||
277 | static void xics_mask_irq(struct irq_data *d) | 277 | static void xics_mask_irq(struct irq_data *d) |
278 | { | 278 | { |
279 | unsigned int irq; | 279 | unsigned int hwirq; |
280 | 280 | ||
281 | pr_devel("xics: mask virq %d\n", d->irq); | 281 | pr_devel("xics: mask virq %d\n", d->irq); |
282 | 282 | ||
283 | irq = (unsigned int)irq_map[d->irq].hwirq; | 283 | hwirq = (unsigned int)irq_map[d->irq].hwirq; |
284 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | 284 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) |
285 | return; | 285 | return; |
286 | xics_mask_real_irq(d); | 286 | xics_mask_real_irq(hwirq); |
287 | } | 287 | } |
288 | 288 | ||
289 | static void xics_mask_unknown_vec(unsigned int vec) | 289 | static void xics_mask_unknown_vec(unsigned int vec) |
290 | { | 290 | { |
291 | printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); | 291 | printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); |
292 | xics_mask_real_irq(irq_get_irq_data(vec)); | 292 | xics_mask_real_irq(vec); |
293 | } | 293 | } |
294 | 294 | ||
295 | static inline unsigned int xics_xirr_vector(unsigned int xirr) | 295 | static inline unsigned int xics_xirr_vector(unsigned int xirr) |
@@ -373,37 +373,37 @@ static unsigned char pop_cppr(void) | |||
373 | 373 | ||
374 | static void xics_eoi_direct(struct irq_data *d) | 374 | static void xics_eoi_direct(struct irq_data *d) |
375 | { | 375 | { |
376 | unsigned int irq = (unsigned int)irq_map[d->irq].hwirq; | 376 | unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; |
377 | 377 | ||
378 | iosync(); | 378 | iosync(); |
379 | direct_xirr_info_set((pop_cppr() << 24) | irq); | 379 | direct_xirr_info_set((pop_cppr() << 24) | hwirq); |
380 | } | 380 | } |
381 | 381 | ||
382 | static void xics_eoi_lpar(struct irq_data *d) | 382 | static void xics_eoi_lpar(struct irq_data *d) |
383 | { | 383 | { |
384 | unsigned int irq = (unsigned int)irq_map[d->irq].hwirq; | 384 | unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; |
385 | 385 | ||
386 | iosync(); | 386 | iosync(); |
387 | lpar_xirr_info_set((pop_cppr() << 24) | irq); | 387 | lpar_xirr_info_set((pop_cppr() << 24) | hwirq); |
388 | } | 388 | } |
389 | 389 | ||
390 | static int | 390 | static int |
391 | xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) | 391 | xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) |
392 | { | 392 | { |
393 | unsigned int irq; | 393 | unsigned int hwirq; |
394 | int status; | 394 | int status; |
395 | int xics_status[2]; | 395 | int xics_status[2]; |
396 | int irq_server; | 396 | int irq_server; |
397 | 397 | ||
398 | irq = (unsigned int)irq_map[d->irq].hwirq; | 398 | hwirq = (unsigned int)irq_map[d->irq].hwirq; |
399 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | 399 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) |
400 | return -1; | 400 | return -1; |
401 | 401 | ||
402 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | 402 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); |
403 | 403 | ||
404 | if (status) { | 404 | if (status) { |
405 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | 405 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", |
406 | __func__, irq, status); | 406 | __func__, hwirq, status); |
407 | return -1; | 407 | return -1; |
408 | } | 408 | } |
409 | 409 | ||
@@ -418,11 +418,11 @@ xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) | |||
418 | } | 418 | } |
419 | 419 | ||
420 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | 420 | status = rtas_call(ibm_set_xive, 3, 1, NULL, |
421 | irq, irq_server, xics_status[1]); | 421 | hwirq, irq_server, xics_status[1]); |
422 | 422 | ||
423 | if (status) { | 423 | if (status) { |
424 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | 424 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", |
425 | __func__, irq, status); | 425 | __func__, hwirq, status); |
426 | return -1; | 426 | return -1; |
427 | } | 427 | } |
428 | 428 | ||
@@ -874,7 +874,7 @@ void xics_kexec_teardown_cpu(int secondary) | |||
874 | void xics_migrate_irqs_away(void) | 874 | void xics_migrate_irqs_away(void) |
875 | { | 875 | { |
876 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | 876 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); |
877 | unsigned int irq, virq; | 877 | int virq; |
878 | 878 | ||
879 | /* If we used to be the default server, move to the new "boot_cpuid" */ | 879 | /* If we used to be the default server, move to the new "boot_cpuid" */ |
880 | if (hw_cpu == default_server) | 880 | if (hw_cpu == default_server) |
@@ -892,6 +892,7 @@ void xics_migrate_irqs_away(void) | |||
892 | for_each_irq(virq) { | 892 | for_each_irq(virq) { |
893 | struct irq_desc *desc; | 893 | struct irq_desc *desc; |
894 | struct irq_chip *chip; | 894 | struct irq_chip *chip; |
895 | unsigned int hwirq; | ||
895 | int xics_status[2]; | 896 | int xics_status[2]; |
896 | int status; | 897 | int status; |
897 | unsigned long flags; | 898 | unsigned long flags; |
@@ -901,9 +902,9 @@ void xics_migrate_irqs_away(void) | |||
901 | continue; | 902 | continue; |
902 | if (irq_map[virq].host != xics_host) | 903 | if (irq_map[virq].host != xics_host) |
903 | continue; | 904 | continue; |
904 | irq = (unsigned int)irq_map[virq].hwirq; | 905 | hwirq = (unsigned int)irq_map[virq].hwirq; |
905 | /* We need to get IPIs still. */ | 906 | /* We need to get IPIs still. */ |
906 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | 907 | if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) |
907 | continue; | 908 | continue; |
908 | 909 | ||
909 | desc = irq_to_desc(virq); | 910 | desc = irq_to_desc(virq); |
@@ -918,10 +919,10 @@ void xics_migrate_irqs_away(void) | |||
918 | 919 | ||
919 | raw_spin_lock_irqsave(&desc->lock, flags); | 920 | raw_spin_lock_irqsave(&desc->lock, flags); |
920 | 921 | ||
921 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | 922 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); |
922 | if (status) { | 923 | if (status) { |
923 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | 924 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", |
924 | __func__, irq, status); | 925 | __func__, hwirq, status); |
925 | goto unlock; | 926 | goto unlock; |
926 | } | 927 | } |
927 | 928 | ||
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index f550e23632f8..a88800ff4d01 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c | |||
@@ -80,7 +80,7 @@ static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) | |||
80 | if ((hw & 1) == 0) { | 80 | if ((hw & 1) == 0) { |
81 | siel |= (0x80000000 >> hw); | 81 | siel |= (0x80000000 >> hw); |
82 | out_be32(&siu_reg->sc_siel, siel); | 82 | out_be32(&siu_reg->sc_siel, siel); |
83 | __irq_set_handler_locked(irq, handle_edge_irq); | 83 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
84 | } | 84 | } |
85 | } | 85 | } |
86 | return 0; | 86 | return 0; |