aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/mmu.h10
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h3
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S5
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c12
-rw-r--r--arch/powerpc/perf/core-book3s.c26
-rw-r--r--arch/powerpc/perf/power8-pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c6
11 files changed, 37 insertions, 37 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bd6dd6ed3a9f..fefe7c8bf05f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -414,7 +414,7 @@ config KEXEC
414config CRASH_DUMP 414config CRASH_DUMP
415 bool "Build a kdump crash kernel" 415 bool "Build a kdump crash kernel"
416 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) 416 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
417 select RELOCATABLE if PPC64 || 44x || FSL_BOOKE 417 select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE
418 help 418 help
419 Build a kernel suitable for use as a kdump capture kernel. 419 Build a kernel suitable for use as a kdump capture kernel.
420 The same kernel binary can be used as production kernel and dump 420 The same kernel binary can be used as production kernel and dump
@@ -1017,6 +1017,7 @@ endmenu
1017if PPC64 1017if PPC64
1018config RELOCATABLE 1018config RELOCATABLE
1019 bool "Build a relocatable kernel" 1019 bool "Build a relocatable kernel"
1020 depends on !COMPILE_TEST
1020 select NONSTATIC_KERNEL 1021 select NONSTATIC_KERNEL
1021 help 1022 help
1022 This builds a kernel image that is capable of running anywhere 1023 This builds a kernel image that is capable of running anywhere
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index f8d1d6dcf7db..e61f24ed4e65 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -19,8 +19,7 @@
19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) 19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) 20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) 21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
22#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020) 22#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
23#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
24 23
25/* 24/*
26 * This is individual features 25 * This is individual features
@@ -106,13 +105,6 @@
106 MMU_FTR_CI_LARGE_PAGE 105 MMU_FTR_CI_LARGE_PAGE
107#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 106#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
108 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B 107 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
109#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
110 MMU_FTR_USE_TLBIVAX_BCAST | \
111 MMU_FTR_LOCK_BCAST_INVAL | \
112 MMU_FTR_USE_TLBRSRV | \
113 MMU_FTR_USE_PAIRED_MAS | \
114 MMU_FTR_TLBIEL | \
115 MMU_FTR_16M_PAGE
116#ifndef __ASSEMBLY__ 108#ifndef __ASSEMBLY__
117#include <asm/cputable.h> 109#include <asm/cputable.h>
118 110
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 9ed737146dbb..b3e936027b26 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -61,8 +61,7 @@ struct power_pmu {
61#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ 61#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
62#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 62#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
63#define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 63#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
64#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ 64#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
65#define PPMU_EBB 0x00000100 /* supports event based branch */
66 65
67/* 66/*
68 * Values for flags to get_alternatives() 67 * Values for flags to get_alternatives()
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 2480256272d4..5cf3d367190d 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -131,7 +131,7 @@ _GLOBAL(power7_nap)
131 131
132_GLOBAL(power7_sleep) 132_GLOBAL(power7_sleep)
133 li r3,1 133 li r3,1
134 li r4,0 134 li r4,1
135 b power7_powersave_common 135 b power7_powersave_common
136 /* No return */ 136 /* No return */
137 137
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 8c86422a1e37..731be7478b27 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -127,11 +127,6 @@ BEGIN_FTR_SECTION
127 stw r10, HSTATE_PMC + 24(r13) 127 stw r10, HSTATE_PMC + 24(r13)
128 stw r11, HSTATE_PMC + 28(r13) 128 stw r11, HSTATE_PMC + 28(r13)
129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
130BEGIN_FTR_SECTION
131 mfspr r9, SPRN_SIER
132 std r8, HSTATE_MMCR + 40(r13)
133 std r9, HSTATE_MMCR + 48(r13)
134END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
13531: 13031:
136 131
137 /* 132 /*
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index af3d78e19302..928ebe79668b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -410,17 +410,7 @@ void __init mmu_context_init(void)
410 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { 410 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
411 first_context = 1; 411 first_context = 1;
412 last_context = 65535; 412 last_context = 65535;
413 } else 413 } else {
414#ifdef CONFIG_PPC_BOOK3E_MMU
415 if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
416 u32 mmucfg = mfspr(SPRN_MMUCFG);
417 u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
418 >> MMUCFG_PIDSIZE_SHIFT;
419 first_context = 1;
420 last_context = (1UL << (pid_bits + 1)) - 1;
421 } else
422#endif
423 {
424 first_context = 1; 414 first_context = 1;
425 last_context = 255; 415 last_context = 255;
426 } 416 }
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4520c9356b54..6b0641c3f03f 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event)
485 * check that the PMU supports EBB, meaning those that don't can still 485 * check that the PMU supports EBB, meaning those that don't can still
486 * use bit 63 of the event code for something else if they wish. 486 * use bit 63 of the event code for something else if they wish.
487 */ 487 */
488 return (ppmu->flags & PPMU_EBB) && 488 return (ppmu->flags & PPMU_ARCH_207S) &&
489 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); 489 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
490} 490}
491 491
@@ -777,7 +777,7 @@ void perf_event_print_debug(void)
777 if (ppmu->flags & PPMU_HAS_SIER) 777 if (ppmu->flags & PPMU_HAS_SIER)
778 sier = mfspr(SPRN_SIER); 778 sier = mfspr(SPRN_SIER);
779 779
780 if (ppmu->flags & PPMU_EBB) { 780 if (ppmu->flags & PPMU_ARCH_207S) {
781 pr_info("MMCR2: %016lx EBBHR: %016lx\n", 781 pr_info("MMCR2: %016lx EBBHR: %016lx\n",
782 mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR)); 782 mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
783 pr_info("EBBRR: %016lx BESCR: %016lx\n", 783 pr_info("EBBRR: %016lx BESCR: %016lx\n",
@@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event)
996 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 996 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
997 997
998 local64_add(delta, &event->count); 998 local64_add(delta, &event->count);
999 local64_sub(delta, &event->hw.period_left); 999
1000 /*
1001 * A number of places program the PMC with (0x80000000 - period_left).
1002 * We never want period_left to be less than 1 because we will program
1003 * the PMC with a value >= 0x800000000 and an edge detected PMC will
1004 * roll around to 0 before taking an exception. We have seen this
1005 * on POWER8.
1006 *
1007 * To fix this, clamp the minimum value of period_left to 1.
1008 */
1009 do {
1010 prev = local64_read(&event->hw.period_left);
1011 val = prev - delta;
1012 if (val < 1)
1013 val = 1;
1014 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
1000} 1015}
1001 1016
1002/* 1017/*
@@ -1300,6 +1315,9 @@ static void power_pmu_enable(struct pmu *pmu)
1300 1315
1301 write_mmcr0(cpuhw, mmcr0); 1316 write_mmcr0(cpuhw, mmcr0);
1302 1317
1318 if (ppmu->flags & PPMU_ARCH_207S)
1319 mtspr(SPRN_MMCR2, 0);
1320
1303 /* 1321 /*
1304 * Enable instruction sampling if necessary 1322 * Enable instruction sampling if necessary
1305 */ 1323 */
@@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event)
1696 1714
1697 if (has_branch_stack(event)) { 1715 if (has_branch_stack(event)) {
1698 /* PMU has BHRB enabled */ 1716 /* PMU has BHRB enabled */
1699 if (!(ppmu->flags & PPMU_BHRB)) 1717 if (!(ppmu->flags & PPMU_ARCH_207S))
1700 return -EOPNOTSUPP; 1718 return -EOPNOTSUPP;
1701 } 1719 }
1702 1720
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index fe2763b6e039..639cd9156585 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = {
792 .get_constraint = power8_get_constraint, 792 .get_constraint = power8_get_constraint,
793 .get_alternatives = power8_get_alternatives, 793 .get_alternatives = power8_get_alternatives,
794 .disable_pmc = power8_disable_pmc, 794 .disable_pmc = power8_disable_pmc,
795 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, 795 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
796 .n_generic = ARRAY_SIZE(power8_generic_events), 796 .n_generic = ARRAY_SIZE(power8_generic_events),
797 .generic_events = power8_generic_events, 797 .generic_events = power8_generic_events,
798 .cache_events = &power8_cache_events, 798 .cache_events = &power8_cache_events,
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 38e0a1a5cec3..5e6e0bad6db6 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -111,6 +111,7 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
111 return ret; 111 return ret;
112} 112}
113 113
114#ifdef CONFIG_COREDUMP
114int elf_coredump_extra_notes_size(void) 115int elf_coredump_extra_notes_size(void)
115{ 116{
116 struct spufs_calls *calls; 117 struct spufs_calls *calls;
@@ -142,6 +143,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm)
142 143
143 return ret; 144 return ret;
144} 145}
146#endif
145 147
146void notify_spus_active(void) 148void notify_spus_active(void)
147{ 149{
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index b9d5d678aa44..52a7d2596d30 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,8 +1,9 @@
1 1
2obj-$(CONFIG_SPU_FS) += spufs.o 2obj-$(CONFIG_SPU_FS) += spufs.o
3spufs-y += inode.o file.o context.o syscalls.o coredump.o 3spufs-y += inode.o file.o context.o syscalls.o
4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o 4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
5spufs-y += switch.o fault.o lscsa_alloc.o 5spufs-y += switch.o fault.o lscsa_alloc.o
6spufs-$(CONFIG_COREDUMP) += coredump.o
6 7
7# magic for the trace events 8# magic for the trace events
8CFLAGS_sched.o := -I$(src) 9CFLAGS_sched.o := -I$(src)
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index b045fdda4845..a87200a535fa 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -79,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
79struct spufs_calls spufs_calls = { 79struct spufs_calls spufs_calls = {
80 .create_thread = do_spu_create, 80 .create_thread = do_spu_create,
81 .spu_run = do_spu_run, 81 .spu_run = do_spu_run,
82 .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
83 .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
84 .notify_spus_active = do_notify_spus_active, 82 .notify_spus_active = do_notify_spus_active,
85 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
84#ifdef CONFIG_COREDUMP
85 .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
86 .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
87#endif
86}; 88};