diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 00:19:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 00:19:54 -0400 |
commit | d4429f608abde89e8bc1e24b43cd503feb95c496 (patch) | |
tree | 4c11afa193593a5e3949391bf35022b4f87ba375 /arch/powerpc/kernel | |
parent | e10117d36ef758da0690c95ecffc09d5dd7da479 (diff) | |
parent | 6a1c9dfe4186f18fed38421b35b40fb9260cbfe1 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (71 commits)
powerpc/44x: Update ppc44x_defconfig
powerpc/watchdog: Make default timeout for Book-E watchdog a Kconfig option
fsl_rio: Add comments for sRIO registers.
powerpc/fsl-booke: Add e55xx (64-bit) smp defconfig
powerpc/fsl-booke: Add p5020 DS board support
powerpc/fsl-booke64: Use TLB CAMs to cover linear mapping on FSL 64-bit chips
powerpc/fsl-booke: Add support for FSL Arch v1.0 MMU in setup_page_sizes
powerpc/fsl-booke: Add support for FSL 64-bit e5500 core
powerpc/85xx: add cache-sram support
powerpc/85xx: add ngPIXIS FPGA device tree node to the P1022DS board
powerpc: Fix compile error with paca code on ppc64e
powerpc/fsl-booke: Add p3041 DS board support
oprofile/fsl emb: Don't set MSR[PMM] until after clearing the interrupt.
powerpc/fsl-booke: Add PCI device ids for P2040/P3041/P5010/P5020 QoirQ chips
powerpc/mpc8xxx_gpio: Add support for 'qoriq-gpio' controllers
powerpc/fsl_booke: Add support to boot from core other than 0
powerpc/p1022: Add probing for individual DMA channels
powerpc/fsl_soc: Search all global-utilities nodes for rstccr
powerpc: Fix invalid page flags in create TLB CAM path for PTE_64BIT
powerpc/mpc83xx: Support for MPC8308 P1M board
...
Fix up conflict with the generic irq_work changes in arch/powerpc/kernel/time.c
Diffstat (limited to 'arch/powerpc/kernel')
30 files changed, 426 insertions, 233 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1dda7012914..4ed076a4db2 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -55,7 +55,9 @@ obj-$(CONFIG_IBMVIO) += vio.o | |||
55 | obj-$(CONFIG_IBMEBUS) += ibmebus.o | 55 | obj-$(CONFIG_IBMEBUS) += ibmebus.o |
56 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o | 56 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o |
57 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 57 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
58 | ifeq ($(CONFIG_PPC32),y) | ||
58 | obj-$(CONFIG_E500) += idle_e500.o | 59 | obj-$(CONFIG_E500) += idle_e500.o |
60 | endif | ||
59 | obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o | 61 | obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o |
60 | obj-$(CONFIG_TAU) += tau_6xx.o | 62 | obj-$(CONFIG_TAU) += tau_6xx.o |
61 | obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o | 63 | obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o |
@@ -67,7 +69,7 @@ endif | |||
67 | obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o | 69 | obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o |
68 | obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o | 70 | obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o |
69 | obj-$(CONFIG_44x) += cpu_setup_44x.o | 71 | obj-$(CONFIG_44x) += cpu_setup_44x.o |
70 | obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o | 72 | obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o |
71 | obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o | 73 | obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o |
72 | 74 | ||
73 | extra-y := head_$(CONFIG_WORD_SIZE).o | 75 | extra-y := head_$(CONFIG_WORD_SIZE).o |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index b876e989220..8184ee97e48 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -889,7 +889,7 @@ int fix_alignment(struct pt_regs *regs) | |||
889 | #ifdef CONFIG_PPC_FPU | 889 | #ifdef CONFIG_PPC_FPU |
890 | preempt_disable(); | 890 | preempt_disable(); |
891 | enable_kernel_fp(); | 891 | enable_kernel_fp(); |
892 | cvt_df(&data.dd, (float *)&data.v[4], ¤t->thread); | 892 | cvt_df(&data.dd, (float *)&data.v[4]); |
893 | preempt_enable(); | 893 | preempt_enable(); |
894 | #else | 894 | #else |
895 | return 0; | 895 | return 0; |
@@ -933,7 +933,7 @@ int fix_alignment(struct pt_regs *regs) | |||
933 | #ifdef CONFIG_PPC_FPU | 933 | #ifdef CONFIG_PPC_FPU |
934 | preempt_disable(); | 934 | preempt_disable(); |
935 | enable_kernel_fp(); | 935 | enable_kernel_fp(); |
936 | cvt_fd((float *)&data.v[4], &data.dd, ¤t->thread); | 936 | cvt_fd((float *)&data.v[4], &data.dd); |
937 | preempt_enable(); | 937 | preempt_enable(); |
938 | #else | 938 | #else |
939 | return 0; | 939 | return 0; |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 1c0607ddccc..c3e01945ad4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #endif | 61 | #endif |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #if defined(CONFIG_FSL_BOOKE) | 64 | #if defined(CONFIG_PPC_FSL_BOOK3E) |
65 | #include "../mm/mmu_decl.h" | 65 | #include "../mm/mmu_decl.h" |
66 | #endif | 66 | #endif |
67 | 67 | ||
@@ -181,17 +181,19 @@ int main(void) | |||
181 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); | 181 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); |
182 | DEFINE(SLBSHADOW_STACKESID, | 182 | DEFINE(SLBSHADOW_STACKESID, |
183 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); | 183 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); |
184 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); | ||
184 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); | 185 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); |
185 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | 186 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); |
186 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | 187 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); |
187 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | 188 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); |
188 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); | 189 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
190 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); | ||
189 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 191 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
190 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | 192 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); |
191 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | 193 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
192 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); | 194 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); |
193 | DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); | 195 | DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); |
194 | DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); | 196 | DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user)); |
195 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); | 197 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); |
196 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | 198 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); |
197 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); | 199 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); |
@@ -468,7 +470,7 @@ int main(void) | |||
468 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); | 470 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); |
469 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); | 471 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
470 | #endif | 472 | #endif |
471 | #ifdef CONFIG_FSL_BOOKE | 473 | #ifdef CONFIG_PPC_FSL_BOOK3E |
472 | DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); | 474 | DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); |
473 | DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); | 475 | DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); |
474 | DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); | 476 | DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); |
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S index 7d606f89a83..e32b4a9a2c2 100644 --- a/arch/powerpc/kernel/cpu_setup_44x.S +++ b/arch/powerpc/kernel/cpu_setup_44x.S | |||
@@ -35,6 +35,7 @@ _GLOBAL(__setup_cpu_440grx) | |||
35 | _GLOBAL(__setup_cpu_460ex) | 35 | _GLOBAL(__setup_cpu_460ex) |
36 | _GLOBAL(__setup_cpu_460gt) | 36 | _GLOBAL(__setup_cpu_460gt) |
37 | _GLOBAL(__setup_cpu_460sx) | 37 | _GLOBAL(__setup_cpu_460sx) |
38 | _GLOBAL(__setup_cpu_apm821xx) | ||
38 | mflr r4 | 39 | mflr r4 |
39 | bl __init_fpu_44x | 40 | bl __init_fpu_44x |
40 | bl __fixup_440A_mcheck | 41 | bl __fixup_440A_mcheck |
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 0adb50ad803..894e64fa481 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S | |||
@@ -51,6 +51,7 @@ _GLOBAL(__e500_dcache_setup) | |||
51 | isync | 51 | isync |
52 | blr | 52 | blr |
53 | 53 | ||
54 | #ifdef CONFIG_PPC32 | ||
54 | _GLOBAL(__setup_cpu_e200) | 55 | _GLOBAL(__setup_cpu_e200) |
55 | /* enable dedicated debug exception handling resources (Debug APU) */ | 56 | /* enable dedicated debug exception handling resources (Debug APU) */ |
56 | mfspr r3,SPRN_HID0 | 57 | mfspr r3,SPRN_HID0 |
@@ -72,3 +73,17 @@ _GLOBAL(__setup_cpu_e500mc) | |||
72 | bl __setup_e500mc_ivors | 73 | bl __setup_e500mc_ivors |
73 | mtlr r4 | 74 | mtlr r4 |
74 | blr | 75 | blr |
76 | #endif | ||
77 | /* Right now, restore and setup are the same thing */ | ||
78 | _GLOBAL(__restore_cpu_e5500) | ||
79 | _GLOBAL(__setup_cpu_e5500) | ||
80 | mflr r4 | ||
81 | bl __e500_icache_setup | ||
82 | bl __e500_dcache_setup | ||
83 | #ifdef CONFIG_PPC_BOOK3E_64 | ||
84 | bl .__setup_base_ivors | ||
85 | #else | ||
86 | bl __setup_e500mc_ivors | ||
87 | #endif | ||
88 | mtlr r4 | ||
89 | blr | ||
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 1f9123f412e..96a908f1cd8 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -48,6 +48,7 @@ extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); | |||
48 | extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); | 48 | extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); |
49 | extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); | 49 | extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); |
50 | extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); | 50 | extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); |
51 | extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); | ||
51 | extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); | 52 | extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); |
52 | extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); | 53 | extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); |
53 | extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); | 54 | extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); |
@@ -66,6 +67,10 @@ extern void __restore_cpu_ppc970(void); | |||
66 | extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); | 67 | extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); |
67 | extern void __restore_cpu_power7(void); | 68 | extern void __restore_cpu_power7(void); |
68 | #endif /* CONFIG_PPC64 */ | 69 | #endif /* CONFIG_PPC64 */ |
70 | #if defined(CONFIG_E500) | ||
71 | extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); | ||
72 | extern void __restore_cpu_e5500(void); | ||
73 | #endif /* CONFIG_E500 */ | ||
69 | 74 | ||
70 | /* This table only contains "desktop" CPUs, it need to be filled with embedded | 75 | /* This table only contains "desktop" CPUs, it need to be filled with embedded |
71 | * ones as well... | 76 | * ones as well... |
@@ -1805,6 +1810,20 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1805 | .machine_check = machine_check_440A, | 1810 | .machine_check = machine_check_440A, |
1806 | .platform = "ppc440", | 1811 | .platform = "ppc440", |
1807 | }, | 1812 | }, |
1813 | { /* 464 in APM821xx */ | ||
1814 | .pvr_mask = 0xffffff00, | ||
1815 | .pvr_value = 0x12C41C80, | ||
1816 | .cpu_name = "APM821XX", | ||
1817 | .cpu_features = CPU_FTRS_44X, | ||
1818 | .cpu_user_features = COMMON_USER_BOOKE | | ||
1819 | PPC_FEATURE_HAS_FPU, | ||
1820 | .mmu_features = MMU_FTR_TYPE_44x, | ||
1821 | .icache_bsize = 32, | ||
1822 | .dcache_bsize = 32, | ||
1823 | .cpu_setup = __setup_cpu_apm821xx, | ||
1824 | .machine_check = machine_check_440A, | ||
1825 | .platform = "ppc440", | ||
1826 | }, | ||
1808 | { /* 476 core */ | 1827 | { /* 476 core */ |
1809 | .pvr_mask = 0xffff0000, | 1828 | .pvr_mask = 0xffff0000, |
1810 | .pvr_value = 0x11a50000, | 1829 | .pvr_value = 0x11a50000, |
@@ -1891,7 +1910,9 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1891 | .platform = "ppc5554", | 1910 | .platform = "ppc5554", |
1892 | } | 1911 | } |
1893 | #endif /* CONFIG_E200 */ | 1912 | #endif /* CONFIG_E200 */ |
1913 | #endif /* CONFIG_PPC32 */ | ||
1894 | #ifdef CONFIG_E500 | 1914 | #ifdef CONFIG_E500 |
1915 | #ifdef CONFIG_PPC32 | ||
1895 | { /* e500 */ | 1916 | { /* e500 */ |
1896 | .pvr_mask = 0xffff0000, | 1917 | .pvr_mask = 0xffff0000, |
1897 | .pvr_value = 0x80200000, | 1918 | .pvr_value = 0x80200000, |
@@ -1946,6 +1967,26 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1946 | .machine_check = machine_check_e500mc, | 1967 | .machine_check = machine_check_e500mc, |
1947 | .platform = "ppce500mc", | 1968 | .platform = "ppce500mc", |
1948 | }, | 1969 | }, |
1970 | #endif /* CONFIG_PPC32 */ | ||
1971 | { /* e5500 */ | ||
1972 | .pvr_mask = 0xffff0000, | ||
1973 | .pvr_value = 0x80240000, | ||
1974 | .cpu_name = "e5500", | ||
1975 | .cpu_features = CPU_FTRS_E500MC, | ||
1976 | .cpu_user_features = COMMON_USER_BOOKE, | ||
1977 | .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | | ||
1978 | MMU_FTR_USE_TLBILX, | ||
1979 | .icache_bsize = 64, | ||
1980 | .dcache_bsize = 64, | ||
1981 | .num_pmcs = 4, | ||
1982 | .oprofile_cpu_type = "ppc/e500mc", | ||
1983 | .oprofile_type = PPC_OPROFILE_FSL_EMB, | ||
1984 | .cpu_setup = __setup_cpu_e5500, | ||
1985 | .cpu_restore = __restore_cpu_e5500, | ||
1986 | .machine_check = machine_check_e500mc, | ||
1987 | .platform = "ppce5500", | ||
1988 | }, | ||
1989 | #ifdef CONFIG_PPC32 | ||
1949 | { /* default match */ | 1990 | { /* default match */ |
1950 | .pvr_mask = 0x00000000, | 1991 | .pvr_mask = 0x00000000, |
1951 | .pvr_value = 0x00000000, | 1992 | .pvr_value = 0x00000000, |
@@ -1960,8 +2001,8 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1960 | .machine_check = machine_check_e500, | 2001 | .machine_check = machine_check_e500, |
1961 | .platform = "powerpc", | 2002 | .platform = "powerpc", |
1962 | } | 2003 | } |
1963 | #endif /* CONFIG_E500 */ | ||
1964 | #endif /* CONFIG_PPC32 */ | 2004 | #endif /* CONFIG_PPC32 */ |
2005 | #endif /* CONFIG_E500 */ | ||
1965 | 2006 | ||
1966 | #ifdef CONFIG_PPC_BOOK3E_64 | 2007 | #ifdef CONFIG_PPC_BOOK3E_64 |
1967 | { /* This is a default entry to get going, to be replaced by | 2008 | { /* This is a default entry to get going, to be replaced by |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 4457382f866..832c8c4db25 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
@@ -414,18 +414,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
414 | crash_kexec_wait_realmode(crashing_cpu); | 414 | crash_kexec_wait_realmode(crashing_cpu); |
415 | #endif | 415 | #endif |
416 | 416 | ||
417 | for_each_irq(i) { | 417 | machine_kexec_mask_interrupts(); |
418 | struct irq_desc *desc = irq_to_desc(i); | ||
419 | |||
420 | if (!desc || !desc->chip || !desc->chip->eoi) | ||
421 | continue; | ||
422 | |||
423 | if (desc->status & IRQ_INPROGRESS) | ||
424 | desc->chip->eoi(i); | ||
425 | |||
426 | if (!(desc->status & IRQ_DISABLED)) | ||
427 | desc->chip->shutdown(i); | ||
428 | } | ||
429 | 418 | ||
430 | /* | 419 | /* |
431 | * Call registered shutdown routines savely. Swap out | 420 | * Call registered shutdown routines savely. Swap out |
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 37771a51811..6e54a0fd31a 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -74,16 +74,17 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) | |||
74 | { | 74 | { |
75 | struct iommu_table *tbl = get_iommu_table_base(dev); | 75 | struct iommu_table *tbl = get_iommu_table_base(dev); |
76 | 76 | ||
77 | if (!tbl || tbl->it_offset > mask) { | 77 | if (!tbl) { |
78 | printk(KERN_INFO | 78 | dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx" |
79 | "Warning: IOMMU offset too big for device mask\n"); | 79 | ", table unavailable\n", mask); |
80 | if (tbl) | 80 | return 0; |
81 | printk(KERN_INFO | 81 | } |
82 | "mask: 0x%08llx, table offset: 0x%08lx\n", | 82 | |
83 | mask, tbl->it_offset); | 83 | if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { |
84 | else | 84 | dev_info(dev, "Warning: IOMMU window too big for device mask\n"); |
85 | printk(KERN_INFO "mask: 0x%08llx, table unavailable\n", | 85 | dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", |
86 | mask); | 86 | mask, (tbl->it_offset + tbl->it_size) << |
87 | IOMMU_PAGE_SHIFT); | ||
87 | return 0; | 88 | return 0; |
88 | } else | 89 | } else |
89 | return 1; | 90 | return 1; |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 84d6367ec00..cf02cad62d9 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/memblock.h> | 12 | #include <linux/memblock.h> |
13 | #include <asm/bug.h> | 13 | #include <asm/bug.h> |
14 | #include <asm/abs_addr.h> | 14 | #include <asm/abs_addr.h> |
15 | #include <asm/machdep.h> | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Generic direct DMA implementation | 18 | * Generic direct DMA implementation |
@@ -89,7 +90,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) | |||
89 | /* Could be improved so platforms can set the limit in case | 90 | /* Could be improved so platforms can set the limit in case |
90 | * they have limited DMA windows | 91 | * they have limited DMA windows |
91 | */ | 92 | */ |
92 | return mask >= (memblock_end_of_DRAM() - 1); | 93 | return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); |
93 | #else | 94 | #else |
94 | return 1; | 95 | return 1; |
95 | #endif | 96 | #endif |
@@ -154,6 +155,23 @@ EXPORT_SYMBOL(dma_direct_ops); | |||
154 | 155 | ||
155 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 156 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
156 | 157 | ||
158 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
159 | { | ||
160 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
161 | |||
162 | if (ppc_md.dma_set_mask) | ||
163 | return ppc_md.dma_set_mask(dev, dma_mask); | ||
164 | if (unlikely(dma_ops == NULL)) | ||
165 | return -EIO; | ||
166 | if (dma_ops->set_dma_mask != NULL) | ||
167 | return dma_ops->set_dma_mask(dev, dma_mask); | ||
168 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
169 | return -EIO; | ||
170 | *dev->dma_mask = dma_mask; | ||
171 | return 0; | ||
172 | } | ||
173 | EXPORT_SYMBOL(dma_set_mask); | ||
174 | |||
157 | static int __init dma_init(void) | 175 | static int __init dma_init(void) |
158 | { | 176 | { |
159 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 177 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 42e9d908914..d82878c4daa 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -97,6 +97,24 @@ system_call_common: | |||
97 | addi r9,r1,STACK_FRAME_OVERHEAD | 97 | addi r9,r1,STACK_FRAME_OVERHEAD |
98 | ld r11,exception_marker@toc(r2) | 98 | ld r11,exception_marker@toc(r2) |
99 | std r11,-16(r9) /* "regshere" marker */ | 99 | std r11,-16(r9) /* "regshere" marker */ |
100 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) | ||
101 | BEGIN_FW_FTR_SECTION | ||
102 | beq 33f | ||
103 | /* if from user, see if there are any DTL entries to process */ | ||
104 | ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ | ||
105 | ld r11,PACA_DTL_RIDX(r13) /* get log read index */ | ||
106 | ld r10,LPPACA_DTLIDX(r10) /* get log write index */ | ||
107 | cmpd cr1,r11,r10 | ||
108 | beq+ cr1,33f | ||
109 | bl .accumulate_stolen_time | ||
110 | REST_GPR(0,r1) | ||
111 | REST_4GPRS(3,r1) | ||
112 | REST_2GPRS(7,r1) | ||
113 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
114 | 33: | ||
115 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | ||
116 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ | ||
117 | |||
100 | #ifdef CONFIG_TRACE_IRQFLAGS | 118 | #ifdef CONFIG_TRACE_IRQFLAGS |
101 | bl .trace_hardirqs_on | 119 | bl .trace_hardirqs_on |
102 | REST_GPR(0,r1) | 120 | REST_GPR(0,r1) |
@@ -202,7 +220,9 @@ syscall_exit: | |||
202 | bge- syscall_error | 220 | bge- syscall_error |
203 | syscall_error_cont: | 221 | syscall_error_cont: |
204 | ld r7,_NIP(r1) | 222 | ld r7,_NIP(r1) |
223 | BEGIN_FTR_SECTION | ||
205 | stdcx. r0,0,r1 /* to clear the reservation */ | 224 | stdcx. r0,0,r1 /* to clear the reservation */ |
225 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
206 | andi. r6,r8,MSR_PR | 226 | andi. r6,r8,MSR_PR |
207 | ld r4,_LINK(r1) | 227 | ld r4,_LINK(r1) |
208 | /* | 228 | /* |
@@ -419,6 +439,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
419 | sync | 439 | sync |
420 | #endif /* CONFIG_SMP */ | 440 | #endif /* CONFIG_SMP */ |
421 | 441 | ||
442 | /* | ||
443 | * If we optimise away the clear of the reservation in system | ||
444 | * calls because we know the CPU tracks the address of the | ||
445 | * reservation, then we need to clear it here to cover the | ||
446 | * case that the kernel context switch path has no larx | ||
447 | * instructions. | ||
448 | */ | ||
449 | BEGIN_FTR_SECTION | ||
450 | ldarx r6,0,r1 | ||
451 | END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
452 | |||
422 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ | 453 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
423 | std r6,PACACURRENT(r13) /* Set new 'current' */ | 454 | std r6,PACACURRENT(r13) /* Set new 'current' */ |
424 | 455 | ||
@@ -576,7 +607,16 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
576 | andi. r0,r3,MSR_RI | 607 | andi. r0,r3,MSR_RI |
577 | beq- unrecov_restore | 608 | beq- unrecov_restore |
578 | 609 | ||
610 | /* | ||
611 | * Clear the reservation. If we know the CPU tracks the address of | ||
612 | * the reservation then we can potentially save some cycles and use | ||
613 | * a larx. On POWER6 and POWER7 this is significantly faster. | ||
614 | */ | ||
615 | BEGIN_FTR_SECTION | ||
579 | stdcx. r0,0,r1 /* to clear the reservation */ | 616 | stdcx. r0,0,r1 /* to clear the reservation */ |
617 | FTR_SECTION_ELSE | ||
618 | ldarx r4,0,r1 | ||
619 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
580 | 620 | ||
581 | /* | 621 | /* |
582 | * Clear RI before restoring r13. If we are returning to | 622 | * Clear RI before restoring r13. If we are returning to |
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index fc8f5b14019..e86c040ae58 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -163,24 +163,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
163 | /* | 163 | /* |
164 | * These are used in the alignment trap handler when emulating | 164 | * These are used in the alignment trap handler when emulating |
165 | * single-precision loads and stores. | 165 | * single-precision loads and stores. |
166 | * We restore and save the fpscr so the task gets the same result | ||
167 | * and exceptions as if the cpu had performed the load or store. | ||
168 | */ | 166 | */ |
169 | 167 | ||
170 | _GLOBAL(cvt_fd) | 168 | _GLOBAL(cvt_fd) |
171 | lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ | ||
172 | MTFSF_L(0) | ||
173 | lfs 0,0(r3) | 169 | lfs 0,0(r3) |
174 | stfd 0,0(r4) | 170 | stfd 0,0(r4) |
175 | mffs 0 | ||
176 | stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ | ||
177 | blr | 171 | blr |
178 | 172 | ||
179 | _GLOBAL(cvt_df) | 173 | _GLOBAL(cvt_df) |
180 | lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ | ||
181 | MTFSF_L(0) | ||
182 | lfd 0,0(r3) | 174 | lfd 0,0(r3) |
183 | stfs 0,0(r4) | 175 | stfs 0,0(r4) |
184 | mffs 0 | ||
185 | stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ | ||
186 | blr | 176 | blr |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 4faeba24785..529b817f473 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -152,8 +152,11 @@ _ENTRY(__early_start) | |||
152 | /* Check to see if we're the second processor, and jump | 152 | /* Check to see if we're the second processor, and jump |
153 | * to the secondary_start code if so | 153 | * to the secondary_start code if so |
154 | */ | 154 | */ |
155 | mfspr r24,SPRN_PIR | 155 | lis r24, boot_cpuid@h |
156 | cmpwi r24,0 | 156 | ori r24, r24, boot_cpuid@l |
157 | lwz r24, 0(r24) | ||
158 | cmpwi r24, -1 | ||
159 | mfspr r24,SPRN_PIR | ||
157 | bne __secondary_start | 160 | bne __secondary_start |
158 | #endif | 161 | #endif |
159 | 162 | ||
@@ -175,6 +178,9 @@ _ENTRY(__early_start) | |||
175 | li r0,0 | 178 | li r0,0 |
176 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | 179 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) |
177 | 180 | ||
181 | rlwinm r22,r1,0,0,31-THREAD_SHIFT /* current thread_info */ | ||
182 | stw r24, TI_CPU(r22) | ||
183 | |||
178 | bl early_init | 184 | bl early_init |
179 | 185 | ||
180 | #ifdef CONFIG_RELOCATABLE | 186 | #ifdef CONFIG_RELOCATABLE |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 1903290f546..ce557f6f00f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -587,8 +587,10 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
587 | * this will be fixed once slab is made available early | 587 | * this will be fixed once slab is made available early |
588 | * instead of the current cruft | 588 | * instead of the current cruft |
589 | */ | 589 | */ |
590 | if (mem_init_done) | 590 | if (mem_init_done) { |
591 | of_node_put(host->of_node); | ||
591 | kfree(host); | 592 | kfree(host); |
593 | } | ||
592 | return NULL; | 594 | return NULL; |
593 | } | 595 | } |
594 | irq_map[0].host = host; | 596 | irq_map[0].host = host; |
@@ -1143,7 +1145,7 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
1143 | unsigned long flags; | 1145 | unsigned long flags; |
1144 | struct irq_desc *desc; | 1146 | struct irq_desc *desc; |
1145 | const char *p; | 1147 | const char *p; |
1146 | char none[] = "none"; | 1148 | static const char none[] = "none"; |
1147 | int i; | 1149 | int i; |
1148 | 1150 | ||
1149 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", | 1151 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 50362b6ef6e..8d9e3b9cda6 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -56,7 +56,7 @@ static unsigned long get_purr(void) | |||
56 | 56 | ||
57 | for_each_possible_cpu(cpu) { | 57 | for_each_possible_cpu(cpu) { |
58 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 58 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
59 | sum_purr += lppaca[cpu].emulated_time_base; | 59 | sum_purr += lppaca_of(cpu).emulated_time_base; |
60 | else { | 60 | else { |
61 | struct cpu_usage *cu; | 61 | struct cpu_usage *cu; |
62 | 62 | ||
@@ -263,7 +263,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
263 | ppp_data.active_system_procs); | 263 | ppp_data.active_system_procs); |
264 | 264 | ||
265 | /* pool related entries are apropriate for shared configs */ | 265 | /* pool related entries are apropriate for shared configs */ |
266 | if (lppaca[0].shared_proc) { | 266 | if (lppaca_of(0).shared_proc) { |
267 | unsigned long pool_idle_time, pool_procs; | 267 | unsigned long pool_idle_time, pool_procs; |
268 | 268 | ||
269 | seq_printf(m, "pool=%d\n", ppp_data.pool_num); | 269 | seq_printf(m, "pool=%d\n", ppp_data.pool_num); |
@@ -460,8 +460,8 @@ static void pseries_cmo_data(struct seq_file *m) | |||
460 | return; | 460 | return; |
461 | 461 | ||
462 | for_each_possible_cpu(cpu) { | 462 | for_each_possible_cpu(cpu) { |
463 | cmo_faults += lppaca[cpu].cmo_faults; | 463 | cmo_faults += lppaca_of(cpu).cmo_faults; |
464 | cmo_fault_time += lppaca[cpu].cmo_fault_time; | 464 | cmo_fault_time += lppaca_of(cpu).cmo_fault_time; |
465 | } | 465 | } |
466 | 466 | ||
467 | seq_printf(m, "cmo_faults=%lu\n", cmo_faults); | 467 | seq_printf(m, "cmo_faults=%lu\n", cmo_faults); |
@@ -479,8 +479,8 @@ static void splpar_dispatch_data(struct seq_file *m) | |||
479 | unsigned long dispatch_dispersions = 0; | 479 | unsigned long dispatch_dispersions = 0; |
480 | 480 | ||
481 | for_each_possible_cpu(cpu) { | 481 | for_each_possible_cpu(cpu) { |
482 | dispatches += lppaca[cpu].yield_count; | 482 | dispatches += lppaca_of(cpu).yield_count; |
483 | dispatch_dispersions += lppaca[cpu].dispersion_count; | 483 | dispatch_dispersions += lppaca_of(cpu).dispersion_count; |
484 | } | 484 | } |
485 | 485 | ||
486 | seq_printf(m, "dispatches=%lu\n", dispatches); | 486 | seq_printf(m, "dispatches=%lu\n", dispatches); |
@@ -545,7 +545,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
545 | seq_printf(m, "partition_potential_processors=%d\n", | 545 | seq_printf(m, "partition_potential_processors=%d\n", |
546 | partition_potential_processors); | 546 | partition_potential_processors); |
547 | 547 | ||
548 | seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc); | 548 | seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc); |
549 | 549 | ||
550 | seq_printf(m, "slb_size=%d\n", mmu_slb_size); | 550 | seq_printf(m, "slb_size=%d\n", mmu_slb_size); |
551 | 551 | ||
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index dd6c141f166..df7e20c191c 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -14,10 +14,34 @@ | |||
14 | #include <linux/threads.h> | 14 | #include <linux/threads.h> |
15 | #include <linux/memblock.h> | 15 | #include <linux/memblock.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | #include <linux/irq.h> | ||
18 | |||
17 | #include <asm/machdep.h> | 19 | #include <asm/machdep.h> |
18 | #include <asm/prom.h> | 20 | #include <asm/prom.h> |
19 | #include <asm/sections.h> | 21 | #include <asm/sections.h> |
20 | 22 | ||
23 | void machine_kexec_mask_interrupts(void) { | ||
24 | unsigned int i; | ||
25 | |||
26 | for_each_irq(i) { | ||
27 | struct irq_desc *desc = irq_to_desc(i); | ||
28 | |||
29 | if (!desc || !desc->chip) | ||
30 | continue; | ||
31 | |||
32 | if (desc->chip->eoi && | ||
33 | desc->status & IRQ_INPROGRESS) | ||
34 | desc->chip->eoi(i); | ||
35 | |||
36 | if (desc->chip->mask) | ||
37 | desc->chip->mask(i); | ||
38 | |||
39 | if (desc->chip->disable && | ||
40 | !(desc->status & IRQ_DISABLED)) | ||
41 | desc->chip->disable(i); | ||
42 | } | ||
43 | } | ||
44 | |||
21 | void machine_crash_shutdown(struct pt_regs *regs) | 45 | void machine_crash_shutdown(struct pt_regs *regs) |
22 | { | 46 | { |
23 | if (ppc_md.machine_crash_shutdown) | 47 | if (ppc_md.machine_crash_shutdown) |
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c index ae63a964b85..e63f2e7d2ef 100644 --- a/arch/powerpc/kernel/machine_kexec_32.c +++ b/arch/powerpc/kernel/machine_kexec_32.c | |||
@@ -39,6 +39,10 @@ void default_machine_kexec(struct kimage *image) | |||
39 | /* Interrupts aren't acceptable while we reboot */ | 39 | /* Interrupts aren't acceptable while we reboot */ |
40 | local_irq_disable(); | 40 | local_irq_disable(); |
41 | 41 | ||
42 | /* mask each interrupt so we are in a more sane state for the | ||
43 | * kexec kernel */ | ||
44 | machine_kexec_mask_interrupts(); | ||
45 | |||
42 | page_list = image->head; | 46 | page_list = image->head; |
43 | 47 | ||
44 | /* we need both effective and real address here */ | 48 | /* we need both effective and real address here */ |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index a4e72159234..ebf9846f3c3 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -27,6 +27,20 @@ extern unsigned long __toc_start; | |||
27 | #ifdef CONFIG_PPC_BOOK3S | 27 | #ifdef CONFIG_PPC_BOOK3S |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * We only have to have statically allocated lppaca structs on | ||
31 | * legacy iSeries, which supports at most 64 cpus. | ||
32 | */ | ||
33 | #ifdef CONFIG_PPC_ISERIES | ||
34 | #if NR_CPUS < 64 | ||
35 | #define NR_LPPACAS NR_CPUS | ||
36 | #else | ||
37 | #define NR_LPPACAS 64 | ||
38 | #endif | ||
39 | #else /* not iSeries */ | ||
40 | #define NR_LPPACAS 1 | ||
41 | #endif | ||
42 | |||
43 | /* | ||
30 | * The structure which the hypervisor knows about - this structure | 44 | * The structure which the hypervisor knows about - this structure |
31 | * should not cross a page boundary. The vpa_init/register_vpa call | 45 | * should not cross a page boundary. The vpa_init/register_vpa call |
32 | * is now known to fail if the lppaca structure crosses a page | 46 | * is now known to fail if the lppaca structure crosses a page |
@@ -36,7 +50,7 @@ extern unsigned long __toc_start; | |||
36 | * will suffice to ensure that it doesn't cross a page boundary. | 50 | * will suffice to ensure that it doesn't cross a page boundary. |
37 | */ | 51 | */ |
38 | struct lppaca lppaca[] = { | 52 | struct lppaca lppaca[] = { |
39 | [0 ... (NR_CPUS-1)] = { | 53 | [0 ... (NR_LPPACAS-1)] = { |
40 | .desc = 0xd397d781, /* "LpPa" */ | 54 | .desc = 0xd397d781, /* "LpPa" */ |
41 | .size = sizeof(struct lppaca), | 55 | .size = sizeof(struct lppaca), |
42 | .dyn_proc_status = 2, | 56 | .dyn_proc_status = 2, |
@@ -49,6 +63,54 @@ struct lppaca lppaca[] = { | |||
49 | }, | 63 | }, |
50 | }; | 64 | }; |
51 | 65 | ||
66 | static struct lppaca *extra_lppacas; | ||
67 | static long __initdata lppaca_size; | ||
68 | |||
69 | static void allocate_lppacas(int nr_cpus, unsigned long limit) | ||
70 | { | ||
71 | if (nr_cpus <= NR_LPPACAS) | ||
72 | return; | ||
73 | |||
74 | lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) * | ||
75 | (nr_cpus - NR_LPPACAS)); | ||
76 | extra_lppacas = __va(memblock_alloc_base(lppaca_size, | ||
77 | PAGE_SIZE, limit)); | ||
78 | } | ||
79 | |||
80 | static struct lppaca *new_lppaca(int cpu) | ||
81 | { | ||
82 | struct lppaca *lp; | ||
83 | |||
84 | if (cpu < NR_LPPACAS) | ||
85 | return &lppaca[cpu]; | ||
86 | |||
87 | lp = extra_lppacas + (cpu - NR_LPPACAS); | ||
88 | *lp = lppaca[0]; | ||
89 | |||
90 | return lp; | ||
91 | } | ||
92 | |||
93 | static void free_lppacas(void) | ||
94 | { | ||
95 | long new_size = 0, nr; | ||
96 | |||
97 | if (!lppaca_size) | ||
98 | return; | ||
99 | nr = num_possible_cpus() - NR_LPPACAS; | ||
100 | if (nr > 0) | ||
101 | new_size = PAGE_ALIGN(nr * sizeof(struct lppaca)); | ||
102 | if (new_size >= lppaca_size) | ||
103 | return; | ||
104 | |||
105 | memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size); | ||
106 | lppaca_size = new_size; | ||
107 | } | ||
108 | |||
109 | #else | ||
110 | |||
111 | static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { } | ||
112 | static inline void free_lppacas(void) { } | ||
113 | |||
52 | #endif /* CONFIG_PPC_BOOK3S */ | 114 | #endif /* CONFIG_PPC_BOOK3S */ |
53 | 115 | ||
54 | #ifdef CONFIG_PPC_STD_MMU_64 | 116 | #ifdef CONFIG_PPC_STD_MMU_64 |
@@ -88,7 +150,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) | |||
88 | unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; | 150 | unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; |
89 | 151 | ||
90 | #ifdef CONFIG_PPC_BOOK3S | 152 | #ifdef CONFIG_PPC_BOOK3S |
91 | new_paca->lppaca_ptr = &lppaca[cpu]; | 153 | new_paca->lppaca_ptr = new_lppaca(cpu); |
92 | #else | 154 | #else |
93 | new_paca->kernel_pgd = swapper_pg_dir; | 155 | new_paca->kernel_pgd = swapper_pg_dir; |
94 | #endif | 156 | #endif |
@@ -144,6 +206,8 @@ void __init allocate_pacas(void) | |||
144 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", | 206 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", |
145 | paca_size, nr_cpus, paca); | 207 | paca_size, nr_cpus, paca); |
146 | 208 | ||
209 | allocate_lppacas(nr_cpus, limit); | ||
210 | |||
147 | /* Can't use for_each_*_cpu, as they aren't functional yet */ | 211 | /* Can't use for_each_*_cpu, as they aren't functional yet */ |
148 | for (cpu = 0; cpu < nr_cpus; cpu++) | 212 | for (cpu = 0; cpu < nr_cpus; cpu++) |
149 | initialise_paca(&paca[cpu], cpu); | 213 | initialise_paca(&paca[cpu], cpu); |
@@ -164,4 +228,6 @@ void __init free_unused_pacas(void) | |||
164 | paca_size - new_size); | 228 | paca_size - new_size); |
165 | 229 | ||
166 | paca_size = new_size; | 230 | paca_size = new_size; |
231 | |||
232 | free_lppacas(); | ||
167 | } | 233 | } |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 9021c4ad4bb..10a44e68ef1 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -1090,8 +1090,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | |||
1090 | bus->number, bus->self ? pci_name(bus->self) : "PHB"); | 1090 | bus->number, bus->self ? pci_name(bus->self) : "PHB"); |
1091 | 1091 | ||
1092 | list_for_each_entry(dev, &bus->devices, bus_list) { | 1092 | list_for_each_entry(dev, &bus->devices, bus_list) { |
1093 | struct dev_archdata *sd = &dev->dev.archdata; | ||
1094 | |||
1095 | /* Cardbus can call us to add new devices to a bus, so ignore | 1093 | /* Cardbus can call us to add new devices to a bus, so ignore |
1096 | * those who are already fully discovered | 1094 | * those who are already fully discovered |
1097 | */ | 1095 | */ |
@@ -1107,7 +1105,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | |||
1107 | set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); | 1105 | set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); |
1108 | 1106 | ||
1109 | /* Hook up default DMA ops */ | 1107 | /* Hook up default DMA ops */ |
1110 | sd->dma_ops = pci_dma_ops; | 1108 | set_dma_ops(&dev->dev, pci_dma_ops); |
1111 | set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); | 1109 | set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); |
1112 | 1110 | ||
1113 | /* Additional platform DMA/iommu setup */ | 1111 | /* Additional platform DMA/iommu setup */ |
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 8eff48e20db..3fee685de4d 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -169,9 +169,11 @@ static int p970_marked_instr_event(u64 event) | |||
169 | switch (unit) { | 169 | switch (unit) { |
170 | case PM_VPU: | 170 | case PM_VPU: |
171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ | 171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ |
172 | break; | ||
172 | case PM_LSU0: | 173 | case PM_LSU0: |
173 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ | 174 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ |
174 | mask = 0x085dff00; | 175 | mask = 0x085dff00; |
176 | break; | ||
175 | case PM_LSU1L: | 177 | case PM_LSU1L: |
176 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ | 178 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ |
177 | break; | 179 | break; |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b1c648a36b0..84906d3fc86 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -517,7 +517,6 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
517 | 517 | ||
518 | account_system_vtime(current); | 518 | account_system_vtime(current); |
519 | account_process_vtime(current); | 519 | account_process_vtime(current); |
520 | calculate_steal_time(); | ||
521 | 520 | ||
522 | /* | 521 | /* |
523 | * We can't take a PMU exception inside _switch() since there is a | 522 | * We can't take a PMU exception inside _switch() since there is a |
@@ -1298,14 +1297,3 @@ unsigned long randomize_et_dyn(unsigned long base) | |||
1298 | 1297 | ||
1299 | return ret; | 1298 | return ret; |
1300 | } | 1299 | } |
1301 | |||
1302 | #ifdef CONFIG_SMP | ||
1303 | int arch_sd_sibling_asym_packing(void) | ||
1304 | { | ||
1305 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
1306 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
1307 | return SD_ASYM_PACKING; | ||
1308 | } | ||
1309 | return 0; | ||
1310 | } | ||
1311 | #endif | ||
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 11f3cd9c832..286d9783d93 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1681,7 +1681,7 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
1681 | 1681 | ||
1682 | if (unlikely(current->audit_context)) { | 1682 | if (unlikely(current->audit_context)) { |
1683 | #ifdef CONFIG_PPC64 | 1683 | #ifdef CONFIG_PPC64 |
1684 | if (!test_thread_flag(TIF_32BIT)) | 1684 | if (!is_32bit_task()) |
1685 | audit_syscall_entry(AUDIT_ARCH_PPC64, | 1685 | audit_syscall_entry(AUDIT_ARCH_PPC64, |
1686 | regs->gpr[0], | 1686 | regs->gpr[0], |
1687 | regs->gpr[3], regs->gpr[4], | 1687 | regs->gpr[3], regs->gpr[4], |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 7333fdbf857..8fe8bc61c10 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -805,7 +805,7 @@ static void rtas_percpu_suspend_me(void *info) | |||
805 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); | 805 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); |
806 | } | 806 | } |
807 | 807 | ||
808 | static int rtas_ibm_suspend_me(struct rtas_args *args) | 808 | int rtas_ibm_suspend_me(struct rtas_args *args) |
809 | { | 809 | { |
810 | long state; | 810 | long state; |
811 | long rc; | 811 | long rc; |
@@ -855,7 +855,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) | |||
855 | return atomic_read(&data.error); | 855 | return atomic_read(&data.error); |
856 | } | 856 | } |
857 | #else /* CONFIG_PPC_PSERIES */ | 857 | #else /* CONFIG_PPC_PSERIES */ |
858 | static int rtas_ibm_suspend_me(struct rtas_args *args) | 858 | int rtas_ibm_suspend_me(struct rtas_args *args) |
859 | { | 859 | { |
860 | return -ENOSYS; | 860 | return -ENOSYS; |
861 | } | 861 | } |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index b86111fe925..1d2fbc90530 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | extern void bootx_init(unsigned long r4, unsigned long phys); | 47 | extern void bootx_init(unsigned long r4, unsigned long phys); |
48 | 48 | ||
49 | int boot_cpuid; | 49 | int boot_cpuid = -1; |
50 | EXPORT_SYMBOL_GPL(boot_cpuid); | 50 | EXPORT_SYMBOL_GPL(boot_cpuid); |
51 | int boot_cpuid_phys; | 51 | int boot_cpuid_phys; |
52 | 52 | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 0008bc58e82..68034bbf2e4 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -508,9 +508,6 @@ int __devinit start_secondary(void *unused) | |||
508 | if (smp_ops->take_timebase) | 508 | if (smp_ops->take_timebase) |
509 | smp_ops->take_timebase(); | 509 | smp_ops->take_timebase(); |
510 | 510 | ||
511 | if (system_state > SYSTEM_BOOTING) | ||
512 | snapshot_timebase(); | ||
513 | |||
514 | secondary_cpu_time_init(); | 511 | secondary_cpu_time_init(); |
515 | 512 | ||
516 | ipi_call_lock(); | 513 | ipi_call_lock(); |
@@ -575,11 +572,18 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
575 | 572 | ||
576 | free_cpumask_var(old_mask); | 573 | free_cpumask_var(old_mask); |
577 | 574 | ||
578 | snapshot_timebases(); | ||
579 | |||
580 | dump_numa_cpu_topology(); | 575 | dump_numa_cpu_topology(); |
581 | } | 576 | } |
582 | 577 | ||
578 | int arch_sd_sibling_asym_packing(void) | ||
579 | { | ||
580 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
581 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
582 | return SD_ASYM_PACKING; | ||
583 | } | ||
584 | return 0; | ||
585 | } | ||
586 | |||
583 | #ifdef CONFIG_HOTPLUG_CPU | 587 | #ifdef CONFIG_HOTPLUG_CPU |
584 | int __cpu_disable(void) | 588 | int __cpu_disable(void) |
585 | { | 589 | { |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 54888eb10c3..010406958d9 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -161,10 +161,9 @@ extern struct timezone sys_tz; | |||
161 | static long timezone_offset; | 161 | static long timezone_offset; |
162 | 162 | ||
163 | unsigned long ppc_proc_freq; | 163 | unsigned long ppc_proc_freq; |
164 | EXPORT_SYMBOL(ppc_proc_freq); | 164 | EXPORT_SYMBOL_GPL(ppc_proc_freq); |
165 | unsigned long ppc_tb_freq; | 165 | unsigned long ppc_tb_freq; |
166 | 166 | EXPORT_SYMBOL_GPL(ppc_tb_freq); | |
167 | static DEFINE_PER_CPU(u64, last_jiffy); | ||
168 | 167 | ||
169 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 168 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
170 | /* | 169 | /* |
@@ -185,6 +184,8 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); | |||
185 | 184 | ||
186 | cputime_t cputime_one_jiffy; | 185 | cputime_t cputime_one_jiffy; |
187 | 186 | ||
187 | void (*dtl_consumer)(struct dtl_entry *, u64); | ||
188 | |||
188 | static void calc_cputime_factors(void) | 189 | static void calc_cputime_factors(void) |
189 | { | 190 | { |
190 | struct div_result res; | 191 | struct div_result res; |
@@ -200,62 +201,153 @@ static void calc_cputime_factors(void) | |||
200 | } | 201 | } |
201 | 202 | ||
202 | /* | 203 | /* |
203 | * Read the PURR on systems that have it, otherwise the timebase. | 204 | * Read the SPURR on systems that have it, otherwise the PURR, |
205 | * or if that doesn't exist return the timebase value passed in. | ||
204 | */ | 206 | */ |
205 | static u64 read_purr(void) | 207 | static u64 read_spurr(u64 tb) |
206 | { | 208 | { |
209 | if (cpu_has_feature(CPU_FTR_SPURR)) | ||
210 | return mfspr(SPRN_SPURR); | ||
207 | if (cpu_has_feature(CPU_FTR_PURR)) | 211 | if (cpu_has_feature(CPU_FTR_PURR)) |
208 | return mfspr(SPRN_PURR); | 212 | return mfspr(SPRN_PURR); |
209 | return mftb(); | 213 | return tb; |
210 | } | 214 | } |
211 | 215 | ||
216 | #ifdef CONFIG_PPC_SPLPAR | ||
217 | |||
212 | /* | 218 | /* |
213 | * Read the SPURR on systems that have it, otherwise the purr | 219 | * Scan the dispatch trace log and count up the stolen time. |
220 | * Should be called with interrupts disabled. | ||
214 | */ | 221 | */ |
215 | static u64 read_spurr(u64 purr) | 222 | static u64 scan_dispatch_log(u64 stop_tb) |
216 | { | 223 | { |
217 | /* | 224 | u64 i = local_paca->dtl_ridx; |
218 | * cpus without PURR won't have a SPURR | 225 | struct dtl_entry *dtl = local_paca->dtl_curr; |
219 | * We already know the former when we use this, so tell gcc | 226 | struct dtl_entry *dtl_end = local_paca->dispatch_log_end; |
220 | */ | 227 | struct lppaca *vpa = local_paca->lppaca_ptr; |
221 | if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) | 228 | u64 tb_delta; |
222 | return mfspr(SPRN_SPURR); | 229 | u64 stolen = 0; |
223 | return purr; | 230 | u64 dtb; |
231 | |||
232 | if (i == vpa->dtl_idx) | ||
233 | return 0; | ||
234 | while (i < vpa->dtl_idx) { | ||
235 | if (dtl_consumer) | ||
236 | dtl_consumer(dtl, i); | ||
237 | dtb = dtl->timebase; | ||
238 | tb_delta = dtl->enqueue_to_dispatch_time + | ||
239 | dtl->ready_to_enqueue_time; | ||
240 | barrier(); | ||
241 | if (i + N_DISPATCH_LOG < vpa->dtl_idx) { | ||
242 | /* buffer has overflowed */ | ||
243 | i = vpa->dtl_idx - N_DISPATCH_LOG; | ||
244 | dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | ||
245 | continue; | ||
246 | } | ||
247 | if (dtb > stop_tb) | ||
248 | break; | ||
249 | stolen += tb_delta; | ||
250 | ++i; | ||
251 | ++dtl; | ||
252 | if (dtl == dtl_end) | ||
253 | dtl = local_paca->dispatch_log; | ||
254 | } | ||
255 | local_paca->dtl_ridx = i; | ||
256 | local_paca->dtl_curr = dtl; | ||
257 | return stolen; | ||
224 | } | 258 | } |
225 | 259 | ||
226 | /* | 260 | /* |
261 | * Accumulate stolen time by scanning the dispatch trace log. | ||
262 | * Called on entry from user mode. | ||
263 | */ | ||
264 | void accumulate_stolen_time(void) | ||
265 | { | ||
266 | u64 sst, ust; | ||
267 | |||
268 | sst = scan_dispatch_log(get_paca()->starttime_user); | ||
269 | ust = scan_dispatch_log(get_paca()->starttime); | ||
270 | get_paca()->system_time -= sst; | ||
271 | get_paca()->user_time -= ust; | ||
272 | get_paca()->stolen_time += ust + sst; | ||
273 | } | ||
274 | |||
275 | static inline u64 calculate_stolen_time(u64 stop_tb) | ||
276 | { | ||
277 | u64 stolen = 0; | ||
278 | |||
279 | if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { | ||
280 | stolen = scan_dispatch_log(stop_tb); | ||
281 | get_paca()->system_time -= stolen; | ||
282 | } | ||
283 | |||
284 | stolen += get_paca()->stolen_time; | ||
285 | get_paca()->stolen_time = 0; | ||
286 | return stolen; | ||
287 | } | ||
288 | |||
289 | #else /* CONFIG_PPC_SPLPAR */ | ||
290 | static inline u64 calculate_stolen_time(u64 stop_tb) | ||
291 | { | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | #endif /* CONFIG_PPC_SPLPAR */ | ||
296 | |||
297 | /* | ||
227 | * Account time for a transition between system, hard irq | 298 | * Account time for a transition between system, hard irq |
228 | * or soft irq state. | 299 | * or soft irq state. |
229 | */ | 300 | */ |
230 | void account_system_vtime(struct task_struct *tsk) | 301 | void account_system_vtime(struct task_struct *tsk) |
231 | { | 302 | { |
232 | u64 now, nowscaled, delta, deltascaled, sys_time; | 303 | u64 now, nowscaled, delta, deltascaled; |
233 | unsigned long flags; | 304 | unsigned long flags; |
305 | u64 stolen, udelta, sys_scaled, user_scaled; | ||
234 | 306 | ||
235 | local_irq_save(flags); | 307 | local_irq_save(flags); |
236 | now = read_purr(); | 308 | now = mftb(); |
237 | nowscaled = read_spurr(now); | 309 | nowscaled = read_spurr(now); |
238 | delta = now - get_paca()->startpurr; | 310 | get_paca()->system_time += now - get_paca()->starttime; |
311 | get_paca()->starttime = now; | ||
239 | deltascaled = nowscaled - get_paca()->startspurr; | 312 | deltascaled = nowscaled - get_paca()->startspurr; |
240 | get_paca()->startpurr = now; | ||
241 | get_paca()->startspurr = nowscaled; | 313 | get_paca()->startspurr = nowscaled; |
242 | if (!in_interrupt()) { | 314 | |
243 | /* deltascaled includes both user and system time. | 315 | stolen = calculate_stolen_time(now); |
244 | * Hence scale it based on the purr ratio to estimate | 316 | |
245 | * the system time */ | 317 | delta = get_paca()->system_time; |
246 | sys_time = get_paca()->system_time; | 318 | get_paca()->system_time = 0; |
247 | if (get_paca()->user_time) | 319 | udelta = get_paca()->user_time - get_paca()->utime_sspurr; |
248 | deltascaled = deltascaled * sys_time / | 320 | get_paca()->utime_sspurr = get_paca()->user_time; |
249 | (sys_time + get_paca()->user_time); | 321 | |
250 | delta += sys_time; | 322 | /* |
251 | get_paca()->system_time = 0; | 323 | * Because we don't read the SPURR on every kernel entry/exit, |
324 | * deltascaled includes both user and system SPURR ticks. | ||
325 | * Apportion these ticks to system SPURR ticks and user | ||
326 | * SPURR ticks in the same ratio as the system time (delta) | ||
327 | * and user time (udelta) values obtained from the timebase | ||
328 | * over the same interval. The system ticks get accounted here; | ||
329 | * the user ticks get saved up in paca->user_time_scaled to be | ||
330 | * used by account_process_tick. | ||
331 | */ | ||
332 | sys_scaled = delta; | ||
333 | user_scaled = udelta; | ||
334 | if (deltascaled != delta + udelta) { | ||
335 | if (udelta) { | ||
336 | sys_scaled = deltascaled * delta / (delta + udelta); | ||
337 | user_scaled = deltascaled - sys_scaled; | ||
338 | } else { | ||
339 | sys_scaled = deltascaled; | ||
340 | } | ||
341 | } | ||
342 | get_paca()->user_time_scaled += user_scaled; | ||
343 | |||
344 | if (in_irq() || idle_task(smp_processor_id()) != tsk) { | ||
345 | account_system_time(tsk, 0, delta, sys_scaled); | ||
346 | if (stolen) | ||
347 | account_steal_time(stolen); | ||
348 | } else { | ||
349 | account_idle_time(delta + stolen); | ||
252 | } | 350 | } |
253 | if (in_irq() || idle_task(smp_processor_id()) != tsk) | ||
254 | account_system_time(tsk, 0, delta, deltascaled); | ||
255 | else | ||
256 | account_idle_time(delta); | ||
257 | __get_cpu_var(cputime_last_delta) = delta; | ||
258 | __get_cpu_var(cputime_scaled_last_delta) = deltascaled; | ||
259 | local_irq_restore(flags); | 351 | local_irq_restore(flags); |
260 | } | 352 | } |
261 | EXPORT_SYMBOL_GPL(account_system_vtime); | 353 | EXPORT_SYMBOL_GPL(account_system_vtime); |
@@ -265,125 +357,26 @@ EXPORT_SYMBOL_GPL(account_system_vtime); | |||
265 | * by the exception entry and exit code to the generic process | 357 | * by the exception entry and exit code to the generic process |
266 | * user and system time records. | 358 | * user and system time records. |
267 | * Must be called with interrupts disabled. | 359 | * Must be called with interrupts disabled. |
360 | * Assumes that account_system_vtime() has been called recently | ||
361 | * (i.e. since the last entry from usermode) so that | ||
362 | * get_paca()->user_time_scaled is up to date. | ||
268 | */ | 363 | */ |
269 | void account_process_tick(struct task_struct *tsk, int user_tick) | 364 | void account_process_tick(struct task_struct *tsk, int user_tick) |
270 | { | 365 | { |
271 | cputime_t utime, utimescaled; | 366 | cputime_t utime, utimescaled; |
272 | 367 | ||
273 | utime = get_paca()->user_time; | 368 | utime = get_paca()->user_time; |
369 | utimescaled = get_paca()->user_time_scaled; | ||
274 | get_paca()->user_time = 0; | 370 | get_paca()->user_time = 0; |
275 | utimescaled = cputime_to_scaled(utime); | 371 | get_paca()->user_time_scaled = 0; |
372 | get_paca()->utime_sspurr = 0; | ||
276 | account_user_time(tsk, utime, utimescaled); | 373 | account_user_time(tsk, utime, utimescaled); |
277 | } | 374 | } |
278 | 375 | ||
279 | /* | ||
280 | * Stuff for accounting stolen time. | ||
281 | */ | ||
282 | struct cpu_purr_data { | ||
283 | int initialized; /* thread is running */ | ||
284 | u64 tb; /* last TB value read */ | ||
285 | u64 purr; /* last PURR value read */ | ||
286 | u64 spurr; /* last SPURR value read */ | ||
287 | }; | ||
288 | |||
289 | /* | ||
290 | * Each entry in the cpu_purr_data array is manipulated only by its | ||
291 | * "owner" cpu -- usually in the timer interrupt but also occasionally | ||
292 | * in process context for cpu online. As long as cpus do not touch | ||
293 | * each others' cpu_purr_data, disabling local interrupts is | ||
294 | * sufficient to serialize accesses. | ||
295 | */ | ||
296 | static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); | ||
297 | |||
298 | static void snapshot_tb_and_purr(void *data) | ||
299 | { | ||
300 | unsigned long flags; | ||
301 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); | ||
302 | |||
303 | local_irq_save(flags); | ||
304 | p->tb = get_tb_or_rtc(); | ||
305 | p->purr = mfspr(SPRN_PURR); | ||
306 | wmb(); | ||
307 | p->initialized = 1; | ||
308 | local_irq_restore(flags); | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * Called during boot when all cpus have come up. | ||
313 | */ | ||
314 | void snapshot_timebases(void) | ||
315 | { | ||
316 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
317 | return; | ||
318 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Must be called with interrupts disabled. | ||
323 | */ | ||
324 | void calculate_steal_time(void) | ||
325 | { | ||
326 | u64 tb, purr; | ||
327 | s64 stolen; | ||
328 | struct cpu_purr_data *pme; | ||
329 | |||
330 | pme = &__get_cpu_var(cpu_purr_data); | ||
331 | if (!pme->initialized) | ||
332 | return; /* !CPU_FTR_PURR or early in early boot */ | ||
333 | tb = mftb(); | ||
334 | purr = mfspr(SPRN_PURR); | ||
335 | stolen = (tb - pme->tb) - (purr - pme->purr); | ||
336 | if (stolen > 0) { | ||
337 | if (idle_task(smp_processor_id()) != current) | ||
338 | account_steal_time(stolen); | ||
339 | else | ||
340 | account_idle_time(stolen); | ||
341 | } | ||
342 | pme->tb = tb; | ||
343 | pme->purr = purr; | ||
344 | } | ||
345 | |||
346 | #ifdef CONFIG_PPC_SPLPAR | ||
347 | /* | ||
348 | * Must be called before the cpu is added to the online map when | ||
349 | * a cpu is being brought up at runtime. | ||
350 | */ | ||
351 | static void snapshot_purr(void) | ||
352 | { | ||
353 | struct cpu_purr_data *pme; | ||
354 | unsigned long flags; | ||
355 | |||
356 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
357 | return; | ||
358 | local_irq_save(flags); | ||
359 | pme = &__get_cpu_var(cpu_purr_data); | ||
360 | pme->tb = mftb(); | ||
361 | pme->purr = mfspr(SPRN_PURR); | ||
362 | pme->initialized = 1; | ||
363 | local_irq_restore(flags); | ||
364 | } | ||
365 | |||
366 | #endif /* CONFIG_PPC_SPLPAR */ | ||
367 | |||
368 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | 376 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ |
369 | #define calc_cputime_factors() | 377 | #define calc_cputime_factors() |
370 | #define calculate_steal_time() do { } while (0) | ||
371 | #endif | 378 | #endif |
372 | 379 | ||
373 | #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) | ||
374 | #define snapshot_purr() do { } while (0) | ||
375 | #endif | ||
376 | |||
377 | /* | ||
378 | * Called when a cpu comes up after the system has finished booting, | ||
379 | * i.e. as a result of a hotplug cpu action. | ||
380 | */ | ||
381 | void snapshot_timebase(void) | ||
382 | { | ||
383 | __get_cpu_var(last_jiffy) = get_tb_or_rtc(); | ||
384 | snapshot_purr(); | ||
385 | } | ||
386 | |||
387 | void __delay(unsigned long loops) | 380 | void __delay(unsigned long loops) |
388 | { | 381 | { |
389 | unsigned long start; | 382 | unsigned long start; |
@@ -585,8 +578,6 @@ void timer_interrupt(struct pt_regs * regs) | |||
585 | old_regs = set_irq_regs(regs); | 578 | old_regs = set_irq_regs(regs); |
586 | irq_enter(); | 579 | irq_enter(); |
587 | 580 | ||
588 | calculate_steal_time(); | ||
589 | |||
590 | if (test_irq_work_pending()) { | 581 | if (test_irq_work_pending()) { |
591 | clear_irq_work_pending(); | 582 | clear_irq_work_pending(); |
592 | irq_work_run(); | 583 | irq_work_run(); |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a45a63c3a0c..1b2cdc8eec9 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -538,6 +538,11 @@ int machine_check_e500(struct pt_regs *regs) | |||
538 | 538 | ||
539 | return 0; | 539 | return 0; |
540 | } | 540 | } |
541 | |||
542 | int machine_check_generic(struct pt_regs *regs) | ||
543 | { | ||
544 | return 0; | ||
545 | } | ||
541 | #elif defined(CONFIG_E200) | 546 | #elif defined(CONFIG_E200) |
542 | int machine_check_e200(struct pt_regs *regs) | 547 | int machine_check_e200(struct pt_regs *regs) |
543 | { | 548 | { |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 13002fe206e..fd8728729ab 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -159,7 +159,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
159 | { | 159 | { |
160 | int i; | 160 | int i; |
161 | 161 | ||
162 | if (!vma || test_thread_flag(TIF_32BIT)) { | 162 | if (!vma || is_32bit_task()) { |
163 | printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); | 163 | printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); |
164 | for (i=0; i<vdso32_pages; i++) { | 164 | for (i=0; i<vdso32_pages; i++) { |
165 | struct page *pg = virt_to_page(vdso32_kbase + | 165 | struct page *pg = virt_to_page(vdso32_kbase + |
@@ -170,7 +170,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
170 | dump_one_vdso_page(pg, upg); | 170 | dump_one_vdso_page(pg, upg); |
171 | } | 171 | } |
172 | } | 172 | } |
173 | if (!vma || !test_thread_flag(TIF_32BIT)) { | 173 | if (!vma || !is_32bit_task()) { |
174 | printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); | 174 | printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); |
175 | for (i=0; i<vdso64_pages; i++) { | 175 | for (i=0; i<vdso64_pages; i++) { |
176 | struct page *pg = virt_to_page(vdso64_kbase + | 176 | struct page *pg = virt_to_page(vdso64_kbase + |
@@ -200,7 +200,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
200 | return 0; | 200 | return 0; |
201 | 201 | ||
202 | #ifdef CONFIG_PPC64 | 202 | #ifdef CONFIG_PPC64 |
203 | if (test_thread_flag(TIF_32BIT)) { | 203 | if (is_32bit_task()) { |
204 | vdso_pagelist = vdso32_pagelist; | 204 | vdso_pagelist = vdso32_pagelist; |
205 | vdso_pages = vdso32_pages; | 205 | vdso_pages = vdso32_pages; |
206 | vdso_base = VDSO32_MBASE; | 206 | vdso_base = VDSO32_MBASE; |
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 51ead52141b..9a7946c4173 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile | |||
@@ -14,10 +14,10 @@ obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) | |||
14 | 14 | ||
15 | GCOV_PROFILE := n | 15 | GCOV_PROFILE := n |
16 | 16 | ||
17 | EXTRA_CFLAGS := -shared -fno-common -fno-builtin | 17 | ccflags-y := -shared -fno-common -fno-builtin |
18 | EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ | 18 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ |
19 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | 19 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) |
20 | EXTRA_AFLAGS := -D__VDSO32__ -s | 20 | asflags-y := -D__VDSO32__ -s |
21 | 21 | ||
22 | obj-y += vdso32_wrapper.o | 22 | obj-y += vdso32_wrapper.o |
23 | extra-y += vdso32.lds | 23 | extra-y += vdso32.lds |
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 79da65d44a2..8c500d8622e 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile | |||
@@ -9,10 +9,10 @@ obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) | |||
9 | 9 | ||
10 | GCOV_PROFILE := n | 10 | GCOV_PROFILE := n |
11 | 11 | ||
12 | EXTRA_CFLAGS := -shared -fno-common -fno-builtin | 12 | ccflags-y := -shared -fno-common -fno-builtin |
13 | EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ | 13 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ |
14 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | 14 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) |
15 | EXTRA_AFLAGS := -D__VDSO64__ -s | 15 | asflags-y := -D__VDSO64__ -s |
16 | 16 | ||
17 | obj-y += vdso64_wrapper.o | 17 | obj-y += vdso64_wrapper.o |
18 | extra-y += vdso64.lds | 18 | extra-y += vdso64.lds |
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index fa3469ddaef..d692989a431 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -1184,7 +1184,12 @@ EXPORT_SYMBOL(vio_unregister_driver); | |||
1184 | /* vio_dev refcount hit 0 */ | 1184 | /* vio_dev refcount hit 0 */ |
1185 | static void __devinit vio_dev_release(struct device *dev) | 1185 | static void __devinit vio_dev_release(struct device *dev) |
1186 | { | 1186 | { |
1187 | /* XXX should free TCE table */ | 1187 | struct iommu_table *tbl = get_iommu_table_base(dev); |
1188 | |||
1189 | /* iSeries uses a common table for all vio devices */ | ||
1190 | if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl) | ||
1191 | iommu_free_table(tbl, dev->of_node ? | ||
1192 | dev->of_node->full_name : dev_name(dev)); | ||
1188 | of_node_put(dev->of_node); | 1193 | of_node_put(dev->of_node); |
1189 | kfree(to_vio_dev(dev)); | 1194 | kfree(to_vio_dev(dev)); |
1190 | } | 1195 | } |
@@ -1254,8 +1259,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) | |||
1254 | if (device_register(&viodev->dev)) { | 1259 | if (device_register(&viodev->dev)) { |
1255 | printk(KERN_ERR "%s: failed to register device %s\n", | 1260 | printk(KERN_ERR "%s: failed to register device %s\n", |
1256 | __func__, dev_name(&viodev->dev)); | 1261 | __func__, dev_name(&viodev->dev)); |
1257 | /* XXX free TCE table */ | 1262 | put_device(&viodev->dev); |
1258 | kfree(viodev); | ||
1259 | return NULL; | 1263 | return NULL; |
1260 | } | 1264 | } |
1261 | 1265 | ||