aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-12-11 05:01:53 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-12-11 05:01:53 -0500
commit0fa5d3996dbda1ee9653c43d39b7ef159fb57ee7 (patch)
tree70f0adc3b86bb1511be6607c959506f6365fc2a9
parent0b99cb73105f0527c1c4096960796b8772343a39 (diff)
parent14318efb322e2fe1a034c69463d725209eb9d548 (diff)
Merge branch 'devel-stable' into for-linus
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt77
-rw-r--r--arch/arm/common/gic.c45
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/cputype.h13
-rw-r--r--arch/arm/include/asm/cti.h20
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h8
-rw-r--r--arch/arm/include/asm/mmu.h13
-rw-r--r--arch/arm/include/asm/mmu_context.h88
-rw-r--r--arch/arm/include/asm/percpu.h45
-rw-r--r--arch/arm/include/asm/perf_event.h7
-rw-r--r--arch/arm/include/asm/pgtable-2level.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level.h4
-rw-r--r--arch/arm/include/asm/pgtable.h10
-rw-r--r--arch/arm/include/asm/pmu.h28
-rw-r--r--arch/arm/include/asm/prom.h2
-rw-r--r--arch/arm/include/asm/smp_plat.h17
-rw-r--r--arch/arm/kernel/devtree.c104
-rw-r--r--arch/arm/kernel/hw_breakpoint.c154
-rw-r--r--arch/arm/kernel/perf_event.c85
-rw-r--r--arch/arm/kernel/perf_event_cpu.c74
-rw-r--r--arch/arm/kernel/perf_event_v6.c126
-rw-r--r--arch/arm/kernel/perf_event_v7.c246
-rw-r--r--arch/arm/kernel/perf_event_xscale.c157
-rw-r--r--arch/arm/kernel/setup.c84
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/arm/kernel/topology.c42
-rw-r--r--arch/arm/mach-omap2/pmu.c2
-rw-r--r--arch/arm/mm/context.c207
-rw-r--r--arch/arm/mm/ioremap.c16
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/mm/proc-macros.S4
-rw-r--r--arch/arm/mm/proc-v7-2level.S10
-rw-r--r--arch/arm/mm/proc-v7-3level.S5
34 files changed, 981 insertions, 723 deletions
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
new file mode 100644
index 00000000000..f32494dbfe1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -0,0 +1,77 @@
1* ARM CPUs binding description
2
3The device tree allows to describe the layout of CPUs in a system through
4the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
5defining properties for every cpu.
6
7Bindings for CPU nodes follow the ePAPR standard, available from:
8
9http://devicetree.org
10
11For the ARM architecture every CPU node must contain the following properties:
12
13- device_type: must be "cpu"
14- reg: property matching the CPU MPIDR[23:0] register bits
15 reg[31:24] bits must be set to 0
16- compatible: should be one of:
17 "arm,arm1020"
18 "arm,arm1020e"
19 "arm,arm1022"
20 "arm,arm1026"
21 "arm,arm720"
22 "arm,arm740"
23 "arm,arm7tdmi"
24 "arm,arm920"
25 "arm,arm922"
26 "arm,arm925"
27 "arm,arm926"
28 "arm,arm940"
29 "arm,arm946"
30 "arm,arm9tdmi"
31 "arm,cortex-a5"
32 "arm,cortex-a7"
33 "arm,cortex-a8"
34 "arm,cortex-a9"
35 "arm,cortex-a15"
36 "arm,arm1136"
37 "arm,arm1156"
38 "arm,arm1176"
39 "arm,arm11mpcore"
40 "faraday,fa526"
41 "intel,sa110"
42 "intel,sa1100"
43 "marvell,feroceon"
44 "marvell,mohawk"
45 "marvell,xsc3"
46 "marvell,xscale"
47
48Example:
49
50 cpus {
51 #size-cells = <0>;
52 #address-cells = <1>;
53
54 CPU0: cpu@0 {
55 device_type = "cpu";
56 compatible = "arm,cortex-a15";
57 reg = <0x0>;
58 };
59
60 CPU1: cpu@1 {
61 device_type = "cpu";
62 compatible = "arm,cortex-a15";
63 reg = <0x1>;
64 };
65
66 CPU2: cpu@100 {
67 device_type = "cpu";
68 compatible = "arm,cortex-a7";
69 reg = <0x100>;
70 };
71
72 CPU3: cpu@101 {
73 device_type = "cpu";
74 compatible = "arm,cortex-a7";
75 reg = <0x101>;
76 };
77 };
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index aa526998418..36ae03a3f5d 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -70,6 +70,14 @@ struct gic_chip_data {
70static DEFINE_RAW_SPINLOCK(irq_controller_lock); 70static DEFINE_RAW_SPINLOCK(irq_controller_lock);
71 71
72/* 72/*
73 * The GIC mapping of CPU interfaces does not necessarily match
74 * the logical CPU numbering. Let's use a mapping as returned
75 * by the GIC itself.
76 */
77#define NR_GIC_CPU_IF 8
78static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
79
80/*
73 * Supported arch specific GIC irq extension. 81 * Supported arch specific GIC irq extension.
74 * Default make them NULL. 82 * Default make them NULL.
75 */ 83 */
@@ -238,11 +246,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
238 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); 246 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
239 u32 val, mask, bit; 247 u32 val, mask, bit;
240 248
241 if (cpu >= 8 || cpu >= nr_cpu_ids) 249 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
242 return -EINVAL; 250 return -EINVAL;
243 251
244 mask = 0xff << shift; 252 mask = 0xff << shift;
245 bit = 1 << (cpu_logical_map(cpu) + shift); 253 bit = gic_cpu_map[cpu] << shift;
246 254
247 raw_spin_lock(&irq_controller_lock); 255 raw_spin_lock(&irq_controller_lock);
248 val = readl_relaxed(reg) & ~mask; 256 val = readl_relaxed(reg) & ~mask;
@@ -349,11 +357,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
349 u32 cpumask; 357 u32 cpumask;
350 unsigned int gic_irqs = gic->gic_irqs; 358 unsigned int gic_irqs = gic->gic_irqs;
351 void __iomem *base = gic_data_dist_base(gic); 359 void __iomem *base = gic_data_dist_base(gic);
352 u32 cpu = cpu_logical_map(smp_processor_id());
353
354 cpumask = 1 << cpu;
355 cpumask |= cpumask << 8;
356 cpumask |= cpumask << 16;
357 360
358 writel_relaxed(0, base + GIC_DIST_CTRL); 361 writel_relaxed(0, base + GIC_DIST_CTRL);
359 362
@@ -366,6 +369,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
366 /* 369 /*
367 * Set all global interrupts to this CPU only. 370 * Set all global interrupts to this CPU only.
368 */ 371 */
372 cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
369 for (i = 32; i < gic_irqs; i += 4) 373 for (i = 32; i < gic_irqs; i += 4)
370 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 374 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
371 375
@@ -389,9 +393,25 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
389{ 393{
390 void __iomem *dist_base = gic_data_dist_base(gic); 394 void __iomem *dist_base = gic_data_dist_base(gic);
391 void __iomem *base = gic_data_cpu_base(gic); 395 void __iomem *base = gic_data_cpu_base(gic);
396 unsigned int cpu_mask, cpu = smp_processor_id();
392 int i; 397 int i;
393 398
394 /* 399 /*
400 * Get what the GIC says our CPU mask is.
401 */
402 BUG_ON(cpu >= NR_GIC_CPU_IF);
403 cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
404 gic_cpu_map[cpu] = cpu_mask;
405
406 /*
407 * Clear our mask from the other map entries in case they're
408 * still undefined.
409 */
410 for (i = 0; i < NR_GIC_CPU_IF; i++)
411 if (i != cpu)
412 gic_cpu_map[i] &= ~cpu_mask;
413
414 /*
395 * Deal with the banked PPI and SGI interrupts - disable all 415 * Deal with the banked PPI and SGI interrupts - disable all
396 * PPI interrupts, ensure all SGI interrupts are enabled. 416 * PPI interrupts, ensure all SGI interrupts are enabled.
397 */ 417 */
@@ -646,7 +666,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
646{ 666{
647 irq_hw_number_t hwirq_base; 667 irq_hw_number_t hwirq_base;
648 struct gic_chip_data *gic; 668 struct gic_chip_data *gic;
649 int gic_irqs, irq_base; 669 int gic_irqs, irq_base, i;
650 670
651 BUG_ON(gic_nr >= MAX_GIC_NR); 671 BUG_ON(gic_nr >= MAX_GIC_NR);
652 672
@@ -683,6 +703,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
683 } 703 }
684 704
685 /* 705 /*
706 * Initialize the CPU interface map to all CPUs.
707 * It will be refined as each CPU probes its ID.
708 */
709 for (i = 0; i < NR_GIC_CPU_IF; i++)
710 gic_cpu_map[i] = 0xff;
711
712 /*
686 * For primary GICs, skip over SGIs. 713 * For primary GICs, skip over SGIs.
687 * For secondary GICs, skip over PPIs, too. 714 * For secondary GICs, skip over PPIs, too.
688 */ 715 */
@@ -737,7 +764,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
737 764
738 /* Convert our logical CPU mask into a physical one. */ 765 /* Convert our logical CPU mask into a physical one. */
739 for_each_cpu(cpu, mask) 766 for_each_cpu(cpu, mask)
740 map |= 1 << cpu_logical_map(cpu); 767 map |= gic_cpu_map[cpu];
741 768
742 /* 769 /*
743 * Ensure that stores to Normal memory are visible to the 770 * Ensure that stores to Normal memory are visible to the
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index f70ae175a3d..2ffdaacd461 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += local64.h
16generic-y += msgbuf.h 16generic-y += msgbuf.h
17generic-y += param.h 17generic-y += param.h
18generic-y += parport.h 18generic-y += parport.h
19generic-y += percpu.h
20generic-y += poll.h 19generic-y += poll.h
21generic-y += resource.h 20generic-y += resource.h
22generic-y += sections.h 21generic-y += sections.h
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index d797223b39d..2744f060255 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -15,6 +15,7 @@
15 15
16struct cpuinfo_arm { 16struct cpuinfo_arm {
17 struct cpu cpu; 17 struct cpu cpu;
18 u32 cpuid;
18#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
19 unsigned int loops_per_jiffy; 20 unsigned int loops_per_jiffy;
20#endif 21#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cb47d28cbe1..a59dcb5ab5f 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -25,6 +25,19 @@
25#define CPUID_EXT_ISAR4 "c2, 4" 25#define CPUID_EXT_ISAR4 "c2, 4"
26#define CPUID_EXT_ISAR5 "c2, 5" 26#define CPUID_EXT_ISAR5 "c2, 5"
27 27
28#define MPIDR_SMP_BITMASK (0x3 << 30)
29#define MPIDR_SMP_VALUE (0x2 << 30)
30
31#define MPIDR_MT_BITMASK (0x1 << 24)
32
33#define MPIDR_HWID_BITMASK 0xFFFFFF
34
35#define MPIDR_LEVEL_BITS 8
36#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
37
38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
40
28extern unsigned int processor_id; 41extern unsigned int processor_id;
29 42
30#ifdef CONFIG_CPU_CP15 43#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
index a0ada3ea435..f2e5cad3f30 100644
--- a/arch/arm/include/asm/cti.h
+++ b/arch/arm/include/asm/cti.h
@@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti)
146 */ 146 */
147static inline void cti_unlock(struct cti *cti) 147static inline void cti_unlock(struct cti *cti)
148{ 148{
149 void __iomem *base = cti->base; 149 __raw_writel(LOCKCODE, cti->base + LOCKACCESS);
150 unsigned long val;
151
152 val = __raw_readl(base + LOCKSTATUS);
153
154 if (val & 1) {
155 val = LOCKCODE;
156 __raw_writel(val, base + LOCKACCESS);
157 }
158} 150}
159 151
160/** 152/**
@@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti)
166 */ 158 */
167static inline void cti_lock(struct cti *cti) 159static inline void cti_lock(struct cti *cti)
168{ 160{
169 void __iomem *base = cti->base; 161 __raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
170 unsigned long val;
171
172 val = __raw_readl(base + LOCKSTATUS);
173
174 if (!(val & 1)) {
175 val = ~LOCKCODE;
176 __raw_writel(val, base + LOCKACCESS);
177 }
178} 162}
179#endif 163#endif
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index c190bc992f0..01169dd723f 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg,
98#define ARM_BASE_WCR 112 98#define ARM_BASE_WCR 112
99 99
100/* Accessor macros for the debug registers. */ 100/* Accessor macros for the debug registers. */
101#define ARM_DBG_READ(M, OP2, VAL) do {\ 101#define ARM_DBG_READ(N, M, OP2, VAL) do {\
102 asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ 102 asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\
103} while (0) 103} while (0)
104 104
105#define ARM_DBG_WRITE(M, OP2, VAL) do {\ 105#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\
106 asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ 106 asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
107} while (0) 107} while (0)
108 108
109struct notifier_block; 109struct notifier_block;
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 14965658a92..9f77e7804f3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,18 +5,15 @@
5 5
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 unsigned int id; 8 u64 id;
9 raw_spinlock_t id_lock;
10#endif 9#endif
11 unsigned int kvm_seq; 10 unsigned int vmalloc_seq;
12} mm_context_t; 11} mm_context_t;
13 12
14#ifdef CONFIG_CPU_HAS_ASID 13#ifdef CONFIG_CPU_HAS_ASID
15#define ASID(mm) ((mm)->context.id & 255) 14#define ASID_BITS 8
16 15#define ASID_MASK ((~0ULL) << ASID_BITS)
17/* init_mm.context.id_lock should be initialized. */ 16#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
18#define INIT_MM_CONTEXT(name) \
19 .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
20#else 17#else
21#define ASID(mm) (0) 18#define ASID(mm) (0)
22#endif 19#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 0306bc642c0..e1f644bc7cc 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -20,88 +20,12 @@
20#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
21#include <asm-generic/mm_hooks.h> 21#include <asm-generic/mm_hooks.h>
22 22
23void __check_kvm_seq(struct mm_struct *mm); 23void __check_vmalloc_seq(struct mm_struct *mm);
24 24
25#ifdef CONFIG_CPU_HAS_ASID 25#ifdef CONFIG_CPU_HAS_ASID
26 26
27/* 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28 * On ARMv6, we have the following structure in the Context ID: 28#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
29 *
30 * 31 7 0
31 * +-------------------------+-----------+
32 * | process ID | ASID |
33 * +-------------------------+-----------+
34 * | context ID |
35 * +-------------------------------------+
36 *
37 * The ASID is used to tag entries in the CPU caches and TLBs.
38 * The context ID is used by debuggers and trace logic, and
39 * should be unique within all running processes.
40 */
41#define ASID_BITS 8
42#define ASID_MASK ((~0) << ASID_BITS)
43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44
45extern unsigned int cpu_last_asid;
46
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm);
49void cpu_set_reserved_ttbr0(void);
50
51static inline void switch_new_context(struct mm_struct *mm)
52{
53 unsigned long flags;
54
55 __new_context(mm);
56
57 local_irq_save(flags);
58 cpu_switch_mm(mm->pgd, mm);
59 local_irq_restore(flags);
60}
61
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
64{
65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
66 __check_kvm_seq(mm);
67
68 /*
69 * Required during context switch to avoid speculative page table
70 * walking with the wrong TTBR.
71 */
72 cpu_set_reserved_ttbr0();
73
74 if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
75 /*
76 * The ASID is from the current generation, just switch to the
77 * new pgd. This condition is only true for calls from
78 * context_switch() and interrupts are already disabled.
79 */
80 cpu_switch_mm(mm->pgd, mm);
81 else if (irqs_disabled())
82 /*
83 * Defer the new ASID allocation until after the context
84 * switch critical region since __new_context() cannot be
85 * called with interrupts disabled (it sends IPIs).
86 */
87 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
88 else
89 /*
90 * That is a direct call to switch_mm() or activate_mm() with
91 * interrupts enabled and a new context.
92 */
93 switch_new_context(mm);
94}
95
96#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
97
98#define finish_arch_post_lock_switch \
99 finish_arch_post_lock_switch
100static inline void finish_arch_post_lock_switch(void)
101{
102 if (test_and_clear_thread_flag(TIF_SWITCH_MM))
103 switch_new_context(current->mm);
104}
105 29
106#else /* !CONFIG_CPU_HAS_ASID */ 30#else /* !CONFIG_CPU_HAS_ASID */
107 31
@@ -110,8 +34,8 @@ static inline void finish_arch_post_lock_switch(void)
110static inline void check_and_switch_context(struct mm_struct *mm, 34static inline void check_and_switch_context(struct mm_struct *mm,
111 struct task_struct *tsk) 35 struct task_struct *tsk)
112{ 36{
113 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 37 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
114 __check_kvm_seq(mm); 38 __check_vmalloc_seq(mm);
115 39
116 if (irqs_disabled()) 40 if (irqs_disabled())
117 /* 41 /*
@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void)
143#endif /* CONFIG_CPU_HAS_ASID */ 67#endif /* CONFIG_CPU_HAS_ASID */
144 68
145#define destroy_context(mm) do { } while(0) 69#define destroy_context(mm) do { } while(0)
70#define activate_mm(prev,next) switch_mm(prev, next, NULL)
146 71
147/* 72/*
148 * This is called when "tsk" is about to enter lazy TLB mode. 73 * This is called when "tsk" is about to enter lazy TLB mode.
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
186} 111}
187 112
188#define deactivate_mm(tsk,mm) do { } while (0) 113#define deactivate_mm(tsk,mm) do { } while (0)
189#define activate_mm(prev,next) switch_mm(prev, next, NULL)
190 114
191#endif 115#endif
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
new file mode 100644
index 00000000000..968c0a14e0a
--- /dev/null
+++ b/arch/arm/include/asm/percpu.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef _ASM_ARM_PERCPU_H_
17#define _ASM_ARM_PERCPU_H_
18
19/*
20 * Same as asm-generic/percpu.h, except that we store the per cpu offset
21 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
22 */
23#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
24static inline void set_my_cpu_offset(unsigned long off)
25{
26 /* Set TPIDRPRW */
27 asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
28}
29
30static inline unsigned long __my_cpu_offset(void)
31{
32 unsigned long off;
33 /* Read TPIDRPRW */
34 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
35 return off;
36}
37#define __my_cpu_offset __my_cpu_offset()
38#else
39#define set_my_cpu_offset(x) do {} while(0)
40
41#endif /* CONFIG_SMP */
42
43#include <asm-generic/percpu.h>
44
45#endif /* _ASM_ARM_PERCPU_H_ */
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 625cd621a43..755877527cf 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -21,4 +21,11 @@
21#define C(_x) PERF_COUNT_HW_CACHE_##_x 21#define C(_x) PERF_COUNT_HW_CACHE_##_x
22#define CACHE_OP_UNSUPPORTED 0xFFFF 22#define CACHE_OP_UNSUPPORTED 0xFFFF
23 23
24#ifdef CONFIG_HW_PERF_EVENTS
25struct pt_regs;
26extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
27extern unsigned long perf_misc_flags(struct pt_regs *regs);
28#define perf_misc_flags(regs) perf_misc_flags(regs)
29#endif
30
24#endif /* __ARM_PERF_EVENT_H__ */ 31#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 2317a71c8f8..f97ee02386e 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -115,6 +115,7 @@
115 * The PTE table pointer refers to the hardware entries; the "Linux" 115 * The PTE table pointer refers to the hardware entries; the "Linux"
116 * entries are stored 1024 bytes below. 116 * entries are stored 1024 bytes below.
117 */ 117 */
118#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
118#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) 119#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
119#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) 120#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
120#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 121#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
@@ -123,6 +124,7 @@
123#define L_PTE_USER (_AT(pteval_t, 1) << 8) 124#define L_PTE_USER (_AT(pteval_t, 1) << 8)
124#define L_PTE_XN (_AT(pteval_t, 1) << 9) 125#define L_PTE_XN (_AT(pteval_t, 1) << 9)
125#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ 126#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
127#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
126 128
127/* 129/*
128 * These are the memory types, defined to be compatible with 130 * These are the memory types, defined to be compatible with
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index b24903549d1..a3f37929940 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -67,7 +67,8 @@
67 * These bits overlap with the hardware bits but the naming is preserved for 67 * These bits overlap with the hardware bits but the naming is preserved for
68 * consistency with the classic page table format. 68 * consistency with the classic page table format.
69 */ 69 */
70#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ 70#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
71#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
71#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 72#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
72#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ 73#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
73#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ 74#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
@@ -76,6 +77,7 @@
76#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ 77#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
77#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ 78#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
78#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ 79#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
80#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
79 81
80/* 82/*
81 * To be used in assembly code with the upper page attributes. 83 * To be used in assembly code with the upper page attributes.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 08c12312a1f..9c82f988c0e 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel;
73 73
74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
75 75
76#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) 76#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
77#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) 77#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
78#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) 78#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
79#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 79#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
@@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel;
83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) 83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
84#define PAGE_KERNEL_EXEC pgprot_kernel 84#define PAGE_KERNEL_EXEC pgprot_kernel
85 85
86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) 86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
88#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 88#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
89#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 89#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
@@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
203#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) 203#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
204#define pte_special(pte) (0) 204#define pte_special(pte) (0)
205 205
206#define pte_present_user(pte) \ 206#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
207 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
208 (L_PTE_PRESENT | L_PTE_USER))
209 207
210#if __LINUX_ARM_ARCH__ < 6 208#if __LINUX_ARM_ARCH__ < 6
211static inline void __sync_icache_dcache(pte_t pteval) 209static inline void __sync_icache_dcache(pte_t pteval)
@@ -242,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
242 240
243static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 241static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
244{ 242{
245 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; 243 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
246 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 244 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
247 return pte; 245 return pte;
248} 246}
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index a26170dce02..f24edad26c7 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -67,19 +67,19 @@ struct arm_pmu {
67 cpumask_t active_irqs; 67 cpumask_t active_irqs;
68 char *name; 68 char *name;
69 irqreturn_t (*handle_irq)(int irq_num, void *dev); 69 irqreturn_t (*handle_irq)(int irq_num, void *dev);
70 void (*enable)(struct hw_perf_event *evt, int idx); 70 void (*enable)(struct perf_event *event);
71 void (*disable)(struct hw_perf_event *evt, int idx); 71 void (*disable)(struct perf_event *event);
72 int (*get_event_idx)(struct pmu_hw_events *hw_events, 72 int (*get_event_idx)(struct pmu_hw_events *hw_events,
73 struct hw_perf_event *hwc); 73 struct perf_event *event);
74 int (*set_event_filter)(struct hw_perf_event *evt, 74 int (*set_event_filter)(struct hw_perf_event *evt,
75 struct perf_event_attr *attr); 75 struct perf_event_attr *attr);
76 u32 (*read_counter)(int idx); 76 u32 (*read_counter)(struct perf_event *event);
77 void (*write_counter)(int idx, u32 val); 77 void (*write_counter)(struct perf_event *event, u32 val);
78 void (*start)(void); 78 void (*start)(struct arm_pmu *);
79 void (*stop)(void); 79 void (*stop)(struct arm_pmu *);
80 void (*reset)(void *); 80 void (*reset)(void *);
81 int (*request_irq)(irq_handler_t handler); 81 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
82 void (*free_irq)(void); 82 void (*free_irq)(struct arm_pmu *);
83 int (*map_event)(struct perf_event *event); 83 int (*map_event)(struct perf_event *event);
84 int num_events; 84 int num_events;
85 atomic_t active_events; 85 atomic_t active_events;
@@ -93,15 +93,11 @@ struct arm_pmu {
93 93
94extern const struct dev_pm_ops armpmu_dev_pm_ops; 94extern const struct dev_pm_ops armpmu_dev_pm_ops;
95 95
96int armpmu_register(struct arm_pmu *armpmu, char *name, int type); 96int armpmu_register(struct arm_pmu *armpmu, int type);
97 97
98u64 armpmu_event_update(struct perf_event *event, 98u64 armpmu_event_update(struct perf_event *event);
99 struct hw_perf_event *hwc,
100 int idx);
101 99
102int armpmu_event_set_period(struct perf_event *event, 100int armpmu_event_set_period(struct perf_event *event);
103 struct hw_perf_event *hwc,
104 int idx);
105 101
106int armpmu_map_event(struct perf_event *event, 102int armpmu_map_event(struct perf_event *event,
107 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 103 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index aeae9c609df..8dd51dc1a36 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -15,6 +15,7 @@
15 15
16extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); 16extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
17extern void arm_dt_memblock_reserve(void); 17extern void arm_dt_memblock_reserve(void);
18extern void __init arm_dt_init_cpu_maps(void);
18 19
19#else /* CONFIG_OF */ 20#else /* CONFIG_OF */
20 21
@@ -24,6 +25,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
24} 25}
25 26
26static inline void arm_dt_memblock_reserve(void) { } 27static inline void arm_dt_memblock_reserve(void) { }
28static inline void arm_dt_init_cpu_maps(void) { }
27 29
28#endif /* CONFIG_OF */ 30#endif /* CONFIG_OF */
29#endif /* ASMARM_PROM_H */ 31#endif /* ASMARM_PROM_H */
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 558d6c80aca..aaa61b6f50f 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -5,6 +5,9 @@
5#ifndef __ASMARM_SMP_PLAT_H 5#ifndef __ASMARM_SMP_PLAT_H
6#define __ASMARM_SMP_PLAT_H 6#define __ASMARM_SMP_PLAT_H
7 7
8#include <linux/cpumask.h>
9#include <linux/err.h>
10
8#include <asm/cputype.h> 11#include <asm/cputype.h>
9 12
10/* 13/*
@@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void)
48 */ 51 */
49extern int __cpu_logical_map[]; 52extern int __cpu_logical_map[];
50#define cpu_logical_map(cpu) __cpu_logical_map[cpu] 53#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
54/*
55 * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
56 * - mpidr: MPIDR[23:0] to be used for the look-up
57 *
58 * Returns the cpu logical index or -EINVAL on look-up error
59 */
60static inline int get_logical_index(u32 mpidr)
61{
62 int cpu;
63 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
64 if (cpu_logical_map(cpu) == mpidr)
65 return cpu;
66 return -EINVAL;
67}
51 68
52#endif 69#endif
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index bee7f9d47f0..70f1bdeb241 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -19,8 +19,10 @@
19#include <linux/of_irq.h> 19#include <linux/of_irq.h>
20#include <linux/of_platform.h> 20#include <linux/of_platform.h>
21 21
22#include <asm/cputype.h>
22#include <asm/setup.h> 23#include <asm/setup.h>
23#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/smp_plat.h>
24#include <asm/mach/arch.h> 26#include <asm/mach/arch.h>
25#include <asm/mach-types.h> 27#include <asm/mach-types.h>
26 28
@@ -61,6 +63,108 @@ void __init arm_dt_memblock_reserve(void)
61 } 63 }
62} 64}
63 65
66/*
67 * arm_dt_init_cpu_maps - Function retrieves cpu nodes from the device tree
68 * and builds the cpu logical map array containing MPIDR values related to
69 * logical cpus
70 *
71 * Updates the cpu possible mask with the number of parsed cpu nodes
72 */
73void __init arm_dt_init_cpu_maps(void)
74{
75 /*
76 * Temp logical map is initialized with UINT_MAX values that are
77 * considered invalid logical map entries since the logical map must
78 * contain a list of MPIDR[23:0] values where MPIDR[31:24] must
79 * read as 0.
80 */
81 struct device_node *cpu, *cpus;
82 u32 i, j, cpuidx = 1;
83 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
84
85 u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX };
86 bool bootcpu_valid = false;
87 cpus = of_find_node_by_path("/cpus");
88
89 if (!cpus)
90 return;
91
92 for_each_child_of_node(cpus, cpu) {
93 u32 hwid;
94
95 pr_debug(" * %s...\n", cpu->full_name);
96 /*
97 * A device tree containing CPU nodes with missing "reg"
98 * properties is considered invalid to build the
99 * cpu_logical_map.
100 */
101 if (of_property_read_u32(cpu, "reg", &hwid)) {
102 pr_debug(" * %s missing reg property\n",
103 cpu->full_name);
104 return;
105 }
106
107 /*
108 * 8 MSBs must be set to 0 in the DT since the reg property
109 * defines the MPIDR[23:0].
110 */
111 if (hwid & ~MPIDR_HWID_BITMASK)
112 return;
113
114 /*
115 * Duplicate MPIDRs are a recipe for disaster.
116 * Scan all initialized entries and check for
117 * duplicates. If any is found just bail out.
118 * temp values were initialized to UINT_MAX
119 * to avoid matching valid MPIDR[23:0] values.
120 */
121 for (j = 0; j < cpuidx; j++)
122 if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg "
123 "properties in the DT\n"))
124 return;
125
126 /*
127 * Build a stashed array of MPIDR values. Numbering scheme
128 * requires that if detected the boot CPU must be assigned
129 * logical id 0. Other CPUs get sequential indexes starting
130 * from 1. If a CPU node with a reg property matching the
131 * boot CPU MPIDR is detected, this is recorded so that the
132 * logical map built from DT is validated and can be used
133 * to override the map created in smp_setup_processor_id().
134 */
135 if (hwid == mpidr) {
136 i = 0;
137 bootcpu_valid = true;
138 } else {
139 i = cpuidx++;
140 }
141
142 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than "
143 "max cores %u, capping them\n",
144 cpuidx, nr_cpu_ids)) {
145 cpuidx = nr_cpu_ids;
146 break;
147 }
148
149 tmp_map[i] = hwid;
150 }
151
152 if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], "
153 "fall back to default cpu_logical_map\n"))
154 return;
155
156 /*
157 * Since the boot CPU node contains proper data, and all nodes have
158 * a reg property, the DT CPU list can be considered valid and the
159 * logical map created in smp_setup_processor_id() can be overridden
160 */
161 for (i = 0; i < cpuidx; i++) {
162 set_cpu_possible(i, true);
163 cpu_logical_map(i) = tmp_map[i];
164 pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i));
165 }
166}
167
64/** 168/**
65 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel 169 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
66 * @dt_phys: physical address of dt blob 170 * @dt_phys: physical address of dt blob
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 281bf330124..5ff2e77782b 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -52,14 +52,14 @@ static u8 debug_arch;
52/* Maximum supported watchpoint length. */ 52/* Maximum supported watchpoint length. */
53static u8 max_watchpoint_len; 53static u8 max_watchpoint_len;
54 54
55#define READ_WB_REG_CASE(OP2, M, VAL) \ 55#define READ_WB_REG_CASE(OP2, M, VAL) \
56 case ((OP2 << 4) + M): \ 56 case ((OP2 << 4) + M): \
57 ARM_DBG_READ(c ## M, OP2, VAL); \ 57 ARM_DBG_READ(c0, c ## M, OP2, VAL); \
58 break 58 break
59 59
60#define WRITE_WB_REG_CASE(OP2, M, VAL) \ 60#define WRITE_WB_REG_CASE(OP2, M, VAL) \
61 case ((OP2 << 4) + M): \ 61 case ((OP2 << 4) + M): \
62 ARM_DBG_WRITE(c ## M, OP2, VAL);\ 62 ARM_DBG_WRITE(c0, c ## M, OP2, VAL); \
63 break 63 break
64 64
65#define GEN_READ_WB_REG_CASES(OP2, VAL) \ 65#define GEN_READ_WB_REG_CASES(OP2, VAL) \
@@ -136,12 +136,12 @@ static u8 get_debug_arch(void)
136 136
137 /* Do we implement the extended CPUID interface? */ 137 /* Do we implement the extended CPUID interface? */
138 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { 138 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
139 pr_warning("CPUID feature registers not supported. " 139 pr_warn_once("CPUID feature registers not supported. "
140 "Assuming v6 debug is present.\n"); 140 "Assuming v6 debug is present.\n");
141 return ARM_DEBUG_ARCH_V6; 141 return ARM_DEBUG_ARCH_V6;
142 } 142 }
143 143
144 ARM_DBG_READ(c0, 0, didr); 144 ARM_DBG_READ(c0, c0, 0, didr);
145 return (didr >> 16) & 0xf; 145 return (didr >> 16) & 0xf;
146} 146}
147 147
@@ -169,7 +169,7 @@ static int debug_exception_updates_fsr(void)
169static int get_num_wrp_resources(void) 169static int get_num_wrp_resources(void)
170{ 170{
171 u32 didr; 171 u32 didr;
172 ARM_DBG_READ(c0, 0, didr); 172 ARM_DBG_READ(c0, c0, 0, didr);
173 return ((didr >> 28) & 0xf) + 1; 173 return ((didr >> 28) & 0xf) + 1;
174} 174}
175 175
@@ -177,7 +177,7 @@ static int get_num_wrp_resources(void)
177static int get_num_brp_resources(void) 177static int get_num_brp_resources(void)
178{ 178{
179 u32 didr; 179 u32 didr;
180 ARM_DBG_READ(c0, 0, didr); 180 ARM_DBG_READ(c0, c0, 0, didr);
181 return ((didr >> 24) & 0xf) + 1; 181 return ((didr >> 24) & 0xf) + 1;
182} 182}
183 183
@@ -228,19 +228,17 @@ static int get_num_brps(void)
228 * be put into halting debug mode at any time by an external debugger 228 * be put into halting debug mode at any time by an external debugger
229 * but there is nothing we can do to prevent that. 229 * but there is nothing we can do to prevent that.
230 */ 230 */
231static int enable_monitor_mode(void) 231static int monitor_mode_enabled(void)
232{ 232{
233 u32 dscr; 233 u32 dscr;
234 int ret = 0; 234 ARM_DBG_READ(c0, c1, 0, dscr);
235 235 return !!(dscr & ARM_DSCR_MDBGEN);
236 ARM_DBG_READ(c1, 0, dscr); 236}
237 237
238 /* Ensure that halting mode is disabled. */ 238static int enable_monitor_mode(void)
239 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, 239{
240 "halting debug mode enabled. Unable to access hardware resources.\n")) { 240 u32 dscr;
241 ret = -EPERM; 241 ARM_DBG_READ(c0, c1, 0, dscr);
242 goto out;
243 }
244 242
245 /* If monitor mode is already enabled, just return. */ 243 /* If monitor mode is already enabled, just return. */
246 if (dscr & ARM_DSCR_MDBGEN) 244 if (dscr & ARM_DSCR_MDBGEN)
@@ -250,24 +248,27 @@ static int enable_monitor_mode(void)
250 switch (get_debug_arch()) { 248 switch (get_debug_arch()) {
251 case ARM_DEBUG_ARCH_V6: 249 case ARM_DEBUG_ARCH_V6:
252 case ARM_DEBUG_ARCH_V6_1: 250 case ARM_DEBUG_ARCH_V6_1:
253 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); 251 ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN));
254 break; 252 break;
255 case ARM_DEBUG_ARCH_V7_ECP14: 253 case ARM_DEBUG_ARCH_V7_ECP14:
256 case ARM_DEBUG_ARCH_V7_1: 254 case ARM_DEBUG_ARCH_V7_1:
257 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); 255 ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
256 isb();
258 break; 257 break;
259 default: 258 default:
260 ret = -ENODEV; 259 return -ENODEV;
261 goto out;
262 } 260 }
263 261
264 /* Check that the write made it through. */ 262 /* Check that the write made it through. */
265 ARM_DBG_READ(c1, 0, dscr); 263 ARM_DBG_READ(c0, c1, 0, dscr);
266 if (!(dscr & ARM_DSCR_MDBGEN)) 264 if (!(dscr & ARM_DSCR_MDBGEN)) {
267 ret = -EPERM; 265 pr_warn_once("Failed to enable monitor mode on CPU %d.\n",
266 smp_processor_id());
267 return -EPERM;
268 }
268 269
269out: 270out:
270 return ret; 271 return 0;
271} 272}
272 273
273int hw_breakpoint_slots(int type) 274int hw_breakpoint_slots(int type)
@@ -328,14 +329,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
328{ 329{
329 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 330 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
330 struct perf_event **slot, **slots; 331 struct perf_event **slot, **slots;
331 int i, max_slots, ctrl_base, val_base, ret = 0; 332 int i, max_slots, ctrl_base, val_base;
332 u32 addr, ctrl; 333 u32 addr, ctrl;
333 334
334 /* Ensure that we are in monitor mode and halting mode is disabled. */
335 ret = enable_monitor_mode();
336 if (ret)
337 goto out;
338
339 addr = info->address; 335 addr = info->address;
340 ctrl = encode_ctrl_reg(info->ctrl) | 0x1; 336 ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
341 337
@@ -362,9 +358,9 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
362 } 358 }
363 } 359 }
364 360
365 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { 361 if (i == max_slots) {
366 ret = -EBUSY; 362 pr_warning("Can't find any breakpoint slot\n");
367 goto out; 363 return -EBUSY;
368 } 364 }
369 365
370 /* Override the breakpoint data with the step data. */ 366 /* Override the breakpoint data with the step data. */
@@ -383,9 +379,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
383 379
384 /* Setup the control register. */ 380 /* Setup the control register. */
385 write_wb_reg(ctrl_base + i, ctrl); 381 write_wb_reg(ctrl_base + i, ctrl);
386 382 return 0;
387out:
388 return ret;
389} 383}
390 384
391void arch_uninstall_hw_breakpoint(struct perf_event *bp) 385void arch_uninstall_hw_breakpoint(struct perf_event *bp)
@@ -416,8 +410,10 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
416 } 410 }
417 } 411 }
418 412
419 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) 413 if (i == max_slots) {
414 pr_warning("Can't find any breakpoint slot\n");
420 return; 415 return;
416 }
421 417
422 /* Ensure that we disable the mismatch breakpoint. */ 418 /* Ensure that we disable the mismatch breakpoint. */
423 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && 419 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
@@ -596,6 +592,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
596 int ret = 0; 592 int ret = 0;
597 u32 offset, alignment_mask = 0x3; 593 u32 offset, alignment_mask = 0x3;
598 594
595 /* Ensure that we are in monitor debug mode. */
596 if (!monitor_mode_enabled())
597 return -ENODEV;
598
599 /* Build the arch_hw_breakpoint. */ 599 /* Build the arch_hw_breakpoint. */
600 ret = arch_build_bp_info(bp); 600 ret = arch_build_bp_info(bp);
601 if (ret) 601 if (ret)
@@ -858,7 +858,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
858 local_irq_enable(); 858 local_irq_enable();
859 859
860 /* We only handle watchpoints and hardware breakpoints. */ 860 /* We only handle watchpoints and hardware breakpoints. */
861 ARM_DBG_READ(c1, 0, dscr); 861 ARM_DBG_READ(c0, c1, 0, dscr);
862 862
863 /* Perform perf callbacks. */ 863 /* Perform perf callbacks. */
864 switch (ARM_DSCR_MOE(dscr)) { 864 switch (ARM_DSCR_MOE(dscr)) {
@@ -906,7 +906,7 @@ static struct undef_hook debug_reg_hook = {
906static void reset_ctrl_regs(void *unused) 906static void reset_ctrl_regs(void *unused)
907{ 907{
908 int i, raw_num_brps, err = 0, cpu = smp_processor_id(); 908 int i, raw_num_brps, err = 0, cpu = smp_processor_id();
909 u32 dbg_power; 909 u32 val;
910 910
911 /* 911 /*
912 * v7 debug contains save and restore registers so that debug state 912 * v7 debug contains save and restore registers so that debug state
@@ -919,23 +919,30 @@ static void reset_ctrl_regs(void *unused)
919 switch (debug_arch) { 919 switch (debug_arch) {
920 case ARM_DEBUG_ARCH_V6: 920 case ARM_DEBUG_ARCH_V6:
921 case ARM_DEBUG_ARCH_V6_1: 921 case ARM_DEBUG_ARCH_V6_1:
922 /* ARMv6 cores just need to reset the registers. */ 922 /* ARMv6 cores clear the registers out of reset. */
923 goto reset_regs; 923 goto out_mdbgen;
924 case ARM_DEBUG_ARCH_V7_ECP14: 924 case ARM_DEBUG_ARCH_V7_ECP14:
925 /* 925 /*
926 * Ensure sticky power-down is clear (i.e. debug logic is 926 * Ensure sticky power-down is clear (i.e. debug logic is
927 * powered up). 927 * powered up).
928 */ 928 */
929 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); 929 ARM_DBG_READ(c1, c5, 4, val);
930 if ((dbg_power & 0x1) == 0) 930 if ((val & 0x1) == 0)
931 err = -EPERM; 931 err = -EPERM;
932
933 /*
934 * Check whether we implement OS save and restore.
935 */
936 ARM_DBG_READ(c1, c1, 4, val);
937 if ((val & 0x9) == 0)
938 goto clear_vcr;
932 break; 939 break;
933 case ARM_DEBUG_ARCH_V7_1: 940 case ARM_DEBUG_ARCH_V7_1:
934 /* 941 /*
935 * Ensure the OS double lock is clear. 942 * Ensure the OS double lock is clear.
936 */ 943 */
937 asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power)); 944 ARM_DBG_READ(c1, c3, 4, val);
938 if ((dbg_power & 0x1) == 1) 945 if ((val & 0x1) == 1)
939 err = -EPERM; 946 err = -EPERM;
940 break; 947 break;
941 } 948 }
@@ -947,24 +954,29 @@ static void reset_ctrl_regs(void *unused)
947 } 954 }
948 955
949 /* 956 /*
950 * Unconditionally clear the lock by writing a value 957 * Unconditionally clear the OS lock by writing a value
951 * other than 0xC5ACCE55 to the access register. 958 * other than 0xC5ACCE55 to the access register.
952 */ 959 */
953 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 960 ARM_DBG_WRITE(c1, c0, 4, 0);
954 isb(); 961 isb();
955 962
956 /* 963 /*
957 * Clear any configured vector-catch events before 964 * Clear any configured vector-catch events before
958 * enabling monitor mode. 965 * enabling monitor mode.
959 */ 966 */
960 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); 967clear_vcr:
968 ARM_DBG_WRITE(c0, c7, 0, 0);
961 isb(); 969 isb();
962 970
963reset_regs: 971 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
964 if (enable_monitor_mode()) 972 pr_warning("CPU %d failed to disable vector catch\n", cpu);
965 return; 973 return;
974 }
966 975
967 /* We must also reset any reserved registers. */ 976 /*
977 * The control/value register pairs are UNKNOWN out of reset so
978 * clear them to avoid spurious debug events.
979 */
968 raw_num_brps = get_num_brp_resources(); 980 raw_num_brps = get_num_brp_resources();
969 for (i = 0; i < raw_num_brps; ++i) { 981 for (i = 0; i < raw_num_brps; ++i) {
970 write_wb_reg(ARM_BASE_BCR + i, 0UL); 982 write_wb_reg(ARM_BASE_BCR + i, 0UL);
@@ -975,6 +987,19 @@ reset_regs:
975 write_wb_reg(ARM_BASE_WCR + i, 0UL); 987 write_wb_reg(ARM_BASE_WCR + i, 0UL);
976 write_wb_reg(ARM_BASE_WVR + i, 0UL); 988 write_wb_reg(ARM_BASE_WVR + i, 0UL);
977 } 989 }
990
991 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
992 pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
993 return;
994 }
995
996 /*
997 * Have a crack at enabling monitor mode. We don't actually need
998 * it yet, but reporting an error early is useful if it fails.
999 */
1000out_mdbgen:
1001 if (enable_monitor_mode())
1002 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
978} 1003}
979 1004
980static int __cpuinit dbg_reset_notify(struct notifier_block *self, 1005static int __cpuinit dbg_reset_notify(struct notifier_block *self,
@@ -992,8 +1017,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
992 1017
993static int __init arch_hw_breakpoint_init(void) 1018static int __init arch_hw_breakpoint_init(void)
994{ 1019{
995 u32 dscr;
996
997 debug_arch = get_debug_arch(); 1020 debug_arch = get_debug_arch();
998 1021
999 if (!debug_arch_supported()) { 1022 if (!debug_arch_supported()) {
@@ -1028,17 +1051,10 @@ static int __init arch_hw_breakpoint_init(void)
1028 core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : 1051 core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
1029 "", core_num_wrps); 1052 "", core_num_wrps);
1030 1053
1031 ARM_DBG_READ(c1, 0, dscr); 1054 /* Work out the maximum supported watchpoint length. */
1032 if (dscr & ARM_DSCR_HDBGEN) { 1055 max_watchpoint_len = get_max_wp_len();
1033 max_watchpoint_len = 4; 1056 pr_info("maximum watchpoint size is %u bytes.\n",
1034 pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n", 1057 max_watchpoint_len);
1035 max_watchpoint_len);
1036 } else {
1037 /* Work out the maximum supported watchpoint length. */
1038 max_watchpoint_len = get_max_wp_len();
1039 pr_info("maximum watchpoint size is %u bytes.\n",
1040 max_watchpoint_len);
1041 }
1042 1058
1043 /* Register debug fault handler. */ 1059 /* Register debug fault handler. */
1044 hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1060 hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 53c0304b734..f9e8657dd24 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event,
86 return -ENOENT; 86 return -ENOENT;
87} 87}
88 88
89int 89int armpmu_event_set_period(struct perf_event *event)
90armpmu_event_set_period(struct perf_event *event,
91 struct hw_perf_event *hwc,
92 int idx)
93{ 90{
94 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 91 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
92 struct hw_perf_event *hwc = &event->hw;
95 s64 left = local64_read(&hwc->period_left); 93 s64 left = local64_read(&hwc->period_left);
96 s64 period = hwc->sample_period; 94 s64 period = hwc->sample_period;
97 int ret = 0; 95 int ret = 0;
@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event,
119 117
120 local64_set(&hwc->prev_count, (u64)-left); 118 local64_set(&hwc->prev_count, (u64)-left);
121 119
122 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); 120 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
123 121
124 perf_event_update_userpage(event); 122 perf_event_update_userpage(event);
125 123
126 return ret; 124 return ret;
127} 125}
128 126
129u64 127u64 armpmu_event_update(struct perf_event *event)
130armpmu_event_update(struct perf_event *event,
131 struct hw_perf_event *hwc,
132 int idx)
133{ 128{
134 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 129 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
130 struct hw_perf_event *hwc = &event->hw;
135 u64 delta, prev_raw_count, new_raw_count; 131 u64 delta, prev_raw_count, new_raw_count;
136 132
137again: 133again:
138 prev_raw_count = local64_read(&hwc->prev_count); 134 prev_raw_count = local64_read(&hwc->prev_count);
139 new_raw_count = armpmu->read_counter(idx); 135 new_raw_count = armpmu->read_counter(event);
140 136
141 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 137 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
142 new_raw_count) != prev_raw_count) 138 new_raw_count) != prev_raw_count)
@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event)
159 if (hwc->idx < 0) 155 if (hwc->idx < 0)
160 return; 156 return;
161 157
162 armpmu_event_update(event, hwc, hwc->idx); 158 armpmu_event_update(event);
163} 159}
164 160
165static void 161static void
@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags)
173 * PERF_EF_UPDATE, see comments in armpmu_start(). 169 * PERF_EF_UPDATE, see comments in armpmu_start().
174 */ 170 */
175 if (!(hwc->state & PERF_HES_STOPPED)) { 171 if (!(hwc->state & PERF_HES_STOPPED)) {
176 armpmu->disable(hwc, hwc->idx); 172 armpmu->disable(event);
177 armpmu_event_update(event, hwc, hwc->idx); 173 armpmu_event_update(event);
178 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 174 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
179 } 175 }
180} 176}
181 177
182static void 178static void armpmu_start(struct perf_event *event, int flags)
183armpmu_start(struct perf_event *event, int flags)
184{ 179{
185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 180 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
186 struct hw_perf_event *hwc = &event->hw; 181 struct hw_perf_event *hwc = &event->hw;
@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags)
200 * get an interrupt too soon or *way* too late if the overflow has 195 * get an interrupt too soon or *way* too late if the overflow has
201 * happened since disabling. 196 * happened since disabling.
202 */ 197 */
203 armpmu_event_set_period(event, hwc, hwc->idx); 198 armpmu_event_set_period(event);
204 armpmu->enable(hwc, hwc->idx); 199 armpmu->enable(event);
205} 200}
206 201
207static void 202static void
@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags)
233 perf_pmu_disable(event->pmu); 228 perf_pmu_disable(event->pmu);
234 229
235 /* If we don't have a space for the counter then finish early. */ 230 /* If we don't have a space for the counter then finish early. */
236 idx = armpmu->get_event_idx(hw_events, hwc); 231 idx = armpmu->get_event_idx(hw_events, event);
237 if (idx < 0) { 232 if (idx < 0) {
238 err = idx; 233 err = idx;
239 goto out; 234 goto out;
@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags)
244 * sure it is disabled. 239 * sure it is disabled.
245 */ 240 */
246 event->hw.idx = idx; 241 event->hw.idx = idx;
247 armpmu->disable(hwc, idx); 242 armpmu->disable(event);
248 hw_events->events[idx] = event; 243 hw_events->events[idx] = event;
249 244
250 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 245 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events,
264 struct perf_event *event) 259 struct perf_event *event)
265{ 260{
266 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 261 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
267 struct hw_perf_event fake_event = event->hw;
268 struct pmu *leader_pmu = event->group_leader->pmu; 262 struct pmu *leader_pmu = event->group_leader->pmu;
269 263
270 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 264 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
271 return 1; 265 return 1;
272 266
273 return armpmu->get_event_idx(hw_events, &fake_event) >= 0; 267 return armpmu->get_event_idx(hw_events, event) >= 0;
274} 268}
275 269
276static int 270static int
@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
316static void 310static void
317armpmu_release_hardware(struct arm_pmu *armpmu) 311armpmu_release_hardware(struct arm_pmu *armpmu)
318{ 312{
319 armpmu->free_irq(); 313 armpmu->free_irq(armpmu);
320 pm_runtime_put_sync(&armpmu->plat_device->dev); 314 pm_runtime_put_sync(&armpmu->plat_device->dev);
321} 315}
322 316
@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
330 return -ENODEV; 324 return -ENODEV;
331 325
332 pm_runtime_get_sync(&pmu_device->dev); 326 pm_runtime_get_sync(&pmu_device->dev);
333 err = armpmu->request_irq(armpmu_dispatch_irq); 327 err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
334 if (err) { 328 if (err) {
335 armpmu_release_hardware(armpmu); 329 armpmu_release_hardware(armpmu);
336 return err; 330 return err;
@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu)
465 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); 459 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
466 460
467 if (enabled) 461 if (enabled)
468 armpmu->start(); 462 armpmu->start(armpmu);
469} 463}
470 464
471static void armpmu_disable(struct pmu *pmu) 465static void armpmu_disable(struct pmu *pmu)
472{ 466{
473 struct arm_pmu *armpmu = to_arm_pmu(pmu); 467 struct arm_pmu *armpmu = to_arm_pmu(pmu);
474 armpmu->stop(); 468 armpmu->stop(armpmu);
475} 469}
476 470
477#ifdef CONFIG_PM_RUNTIME 471#ifdef CONFIG_PM_RUNTIME
@@ -517,12 +511,13 @@ static void __init armpmu_init(struct arm_pmu *armpmu)
517 }; 511 };
518} 512}
519 513
520int armpmu_register(struct arm_pmu *armpmu, char *name, int type) 514int armpmu_register(struct arm_pmu *armpmu, int type)
521{ 515{
522 armpmu_init(armpmu); 516 armpmu_init(armpmu);
517 pm_runtime_enable(&armpmu->plat_device->dev);
523 pr_info("enabled with %s PMU driver, %d counters available\n", 518 pr_info("enabled with %s PMU driver, %d counters available\n",
524 armpmu->name, armpmu->num_events); 519 armpmu->name, armpmu->num_events);
525 return perf_pmu_register(&armpmu->pmu, name, type); 520 return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
526} 521}
527 522
528/* 523/*
@@ -576,6 +571,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
576{ 571{
577 struct frame_tail __user *tail; 572 struct frame_tail __user *tail;
578 573
574 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
575 /* We don't support guest os callchain now */
576 return;
577 }
579 578
580 tail = (struct frame_tail __user *)regs->ARM_fp - 1; 579 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
581 580
@@ -603,9 +602,41 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
603{ 602{
604 struct stackframe fr; 603 struct stackframe fr;
605 604
605 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
606 /* We don't support guest os callchain now */
607 return;
608 }
609
606 fr.fp = regs->ARM_fp; 610 fr.fp = regs->ARM_fp;
607 fr.sp = regs->ARM_sp; 611 fr.sp = regs->ARM_sp;
608 fr.lr = regs->ARM_lr; 612 fr.lr = regs->ARM_lr;
609 fr.pc = regs->ARM_pc; 613 fr.pc = regs->ARM_pc;
610 walk_stackframe(&fr, callchain_trace, entry); 614 walk_stackframe(&fr, callchain_trace, entry);
611} 615}
616
617unsigned long perf_instruction_pointer(struct pt_regs *regs)
618{
619 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
620 return perf_guest_cbs->get_guest_ip();
621
622 return instruction_pointer(regs);
623}
624
625unsigned long perf_misc_flags(struct pt_regs *regs)
626{
627 int misc = 0;
628
629 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
630 if (perf_guest_cbs->is_user_mode())
631 misc |= PERF_RECORD_MISC_GUEST_USER;
632 else
633 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
634 } else {
635 if (user_mode(regs))
636 misc |= PERF_RECORD_MISC_USER;
637 else
638 misc |= PERF_RECORD_MISC_KERNEL;
639 }
640
641 return misc;
642}
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 8d7d8d4de9d..9a4f6307a01 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/slab.h>
26#include <linux/spinlock.h> 27#include <linux/spinlock.h>
27 28
28#include <asm/cputype.h> 29#include <asm/cputype.h>
@@ -45,7 +46,7 @@ const char *perf_pmu_name(void)
45 if (!cpu_pmu) 46 if (!cpu_pmu)
46 return NULL; 47 return NULL;
47 48
48 return cpu_pmu->pmu.name; 49 return cpu_pmu->name;
49} 50}
50EXPORT_SYMBOL_GPL(perf_pmu_name); 51EXPORT_SYMBOL_GPL(perf_pmu_name);
51 52
@@ -70,7 +71,7 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
70 return &__get_cpu_var(cpu_hw_events); 71 return &__get_cpu_var(cpu_hw_events);
71} 72}
72 73
73static void cpu_pmu_free_irq(void) 74static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
74{ 75{
75 int i, irq, irqs; 76 int i, irq, irqs;
76 struct platform_device *pmu_device = cpu_pmu->plat_device; 77 struct platform_device *pmu_device = cpu_pmu->plat_device;
@@ -86,7 +87,7 @@ static void cpu_pmu_free_irq(void)
86 } 87 }
87} 88}
88 89
89static int cpu_pmu_request_irq(irq_handler_t handler) 90static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
90{ 91{
91 int i, err, irq, irqs; 92 int i, err, irq, irqs;
92 struct platform_device *pmu_device = cpu_pmu->plat_device; 93 struct platform_device *pmu_device = cpu_pmu->plat_device;
@@ -147,7 +148,7 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
147 148
148 /* Ensure the PMU has sane values out of reset. */ 149 /* Ensure the PMU has sane values out of reset. */
149 if (cpu_pmu && cpu_pmu->reset) 150 if (cpu_pmu && cpu_pmu->reset)
150 on_each_cpu(cpu_pmu->reset, NULL, 1); 151 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
151} 152}
152 153
153/* 154/*
@@ -163,7 +164,9 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
163 return NOTIFY_DONE; 164 return NOTIFY_DONE;
164 165
165 if (cpu_pmu && cpu_pmu->reset) 166 if (cpu_pmu && cpu_pmu->reset)
166 cpu_pmu->reset(NULL); 167 cpu_pmu->reset(cpu_pmu);
168 else
169 return NOTIFY_DONE;
167 170
168 return NOTIFY_OK; 171 return NOTIFY_OK;
169} 172}
@@ -195,13 +198,13 @@ static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
195/* 198/*
196 * CPU PMU identification and probing. 199 * CPU PMU identification and probing.
197 */ 200 */
198static struct arm_pmu *__devinit probe_current_pmu(void) 201static int __devinit probe_current_pmu(struct arm_pmu *pmu)
199{ 202{
200 struct arm_pmu *pmu = NULL;
201 int cpu = get_cpu(); 203 int cpu = get_cpu();
202 unsigned long cpuid = read_cpuid_id(); 204 unsigned long cpuid = read_cpuid_id();
203 unsigned long implementor = (cpuid & 0xFF000000) >> 24; 205 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
204 unsigned long part_number = (cpuid & 0xFFF0); 206 unsigned long part_number = (cpuid & 0xFFF0);
207 int ret = -ENODEV;
205 208
206 pr_info("probing PMU on CPU %d\n", cpu); 209 pr_info("probing PMU on CPU %d\n", cpu);
207 210
@@ -211,25 +214,25 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
211 case 0xB360: /* ARM1136 */ 214 case 0xB360: /* ARM1136 */
212 case 0xB560: /* ARM1156 */ 215 case 0xB560: /* ARM1156 */
213 case 0xB760: /* ARM1176 */ 216 case 0xB760: /* ARM1176 */
214 pmu = armv6pmu_init(); 217 ret = armv6pmu_init(pmu);
215 break; 218 break;
216 case 0xB020: /* ARM11mpcore */ 219 case 0xB020: /* ARM11mpcore */
217 pmu = armv6mpcore_pmu_init(); 220 ret = armv6mpcore_pmu_init(pmu);
218 break; 221 break;
219 case 0xC080: /* Cortex-A8 */ 222 case 0xC080: /* Cortex-A8 */
220 pmu = armv7_a8_pmu_init(); 223 ret = armv7_a8_pmu_init(pmu);
221 break; 224 break;
222 case 0xC090: /* Cortex-A9 */ 225 case 0xC090: /* Cortex-A9 */
223 pmu = armv7_a9_pmu_init(); 226 ret = armv7_a9_pmu_init(pmu);
224 break; 227 break;
225 case 0xC050: /* Cortex-A5 */ 228 case 0xC050: /* Cortex-A5 */
226 pmu = armv7_a5_pmu_init(); 229 ret = armv7_a5_pmu_init(pmu);
227 break; 230 break;
228 case 0xC0F0: /* Cortex-A15 */ 231 case 0xC0F0: /* Cortex-A15 */
229 pmu = armv7_a15_pmu_init(); 232 ret = armv7_a15_pmu_init(pmu);
230 break; 233 break;
231 case 0xC070: /* Cortex-A7 */ 234 case 0xC070: /* Cortex-A7 */
232 pmu = armv7_a7_pmu_init(); 235 ret = armv7_a7_pmu_init(pmu);
233 break; 236 break;
234 } 237 }
235 /* Intel CPUs [xscale]. */ 238 /* Intel CPUs [xscale]. */
@@ -237,43 +240,54 @@ static struct arm_pmu *__devinit probe_current_pmu(void)
237 part_number = (cpuid >> 13) & 0x7; 240 part_number = (cpuid >> 13) & 0x7;
238 switch (part_number) { 241 switch (part_number) {
239 case 1: 242 case 1:
240 pmu = xscale1pmu_init(); 243 ret = xscale1pmu_init(pmu);
241 break; 244 break;
242 case 2: 245 case 2:
243 pmu = xscale2pmu_init(); 246 ret = xscale2pmu_init(pmu);
244 break; 247 break;
245 } 248 }
246 } 249 }
247 250
248 put_cpu(); 251 put_cpu();
249 return pmu; 252 return ret;
250} 253}
251 254
252static int __devinit cpu_pmu_device_probe(struct platform_device *pdev) 255static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
253{ 256{
254 const struct of_device_id *of_id; 257 const struct of_device_id *of_id;
255 struct arm_pmu *(*init_fn)(void); 258 int (*init_fn)(struct arm_pmu *);
256 struct device_node *node = pdev->dev.of_node; 259 struct device_node *node = pdev->dev.of_node;
260 struct arm_pmu *pmu;
261 int ret = -ENODEV;
257 262
258 if (cpu_pmu) { 263 if (cpu_pmu) {
259 pr_info("attempt to register multiple PMU devices!"); 264 pr_info("attempt to register multiple PMU devices!");
260 return -ENOSPC; 265 return -ENOSPC;
261 } 266 }
262 267
268 pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
269 if (!pmu) {
270 pr_info("failed to allocate PMU device!");
271 return -ENOMEM;
272 }
273
263 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { 274 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
264 init_fn = of_id->data; 275 init_fn = of_id->data;
265 cpu_pmu = init_fn(); 276 ret = init_fn(pmu);
266 } else { 277 } else {
267 cpu_pmu = probe_current_pmu(); 278 ret = probe_current_pmu(pmu);
268 } 279 }
269 280
270 if (!cpu_pmu) 281 if (ret) {
271 return -ENODEV; 282 pr_info("failed to register PMU devices!");
283 kfree(pmu);
284 return ret;
285 }
272 286
287 cpu_pmu = pmu;
273 cpu_pmu->plat_device = pdev; 288 cpu_pmu->plat_device = pdev;
274 cpu_pmu_init(cpu_pmu); 289 cpu_pmu_init(cpu_pmu);
275 register_cpu_notifier(&cpu_pmu_hotplug_notifier); 290 armpmu_register(cpu_pmu, PERF_TYPE_RAW);
276 armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
277 291
278 return 0; 292 return 0;
279} 293}
@@ -290,6 +304,16 @@ static struct platform_driver cpu_pmu_driver = {
290 304
291static int __init register_pmu_driver(void) 305static int __init register_pmu_driver(void)
292{ 306{
293 return platform_driver_register(&cpu_pmu_driver); 307 int err;
308
309 err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
310 if (err)
311 return err;
312
313 err = platform_driver_register(&cpu_pmu_driver);
314 if (err)
315 unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
316
317 return err;
294} 318}
295device_initcall(register_pmu_driver); 319device_initcall(register_pmu_driver);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 6ccc0797174..f3e22ff8b6a 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
401 return ret; 401 return ret;
402} 402}
403 403
404static inline u32 404static inline u32 armv6pmu_read_counter(struct perf_event *event)
405armv6pmu_read_counter(int counter)
406{ 405{
406 struct hw_perf_event *hwc = &event->hw;
407 int counter = hwc->idx;
407 unsigned long value = 0; 408 unsigned long value = 0;
408 409
409 if (ARMV6_CYCLE_COUNTER == counter) 410 if (ARMV6_CYCLE_COUNTER == counter)
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter)
418 return value; 419 return value;
419} 420}
420 421
421static inline void 422static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
422armv6pmu_write_counter(int counter,
423 u32 value)
424{ 423{
424 struct hw_perf_event *hwc = &event->hw;
425 int counter = hwc->idx;
426
425 if (ARMV6_CYCLE_COUNTER == counter) 427 if (ARMV6_CYCLE_COUNTER == counter)
426 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); 428 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
427 else if (ARMV6_COUNTER0 == counter) 429 else if (ARMV6_COUNTER0 == counter)
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter,
432 WARN_ONCE(1, "invalid counter number (%d)\n", counter); 434 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
433} 435}
434 436
435static void 437static void armv6pmu_enable_event(struct perf_event *event)
436armv6pmu_enable_event(struct hw_perf_event *hwc,
437 int idx)
438{ 438{
439 unsigned long val, mask, evt, flags; 439 unsigned long val, mask, evt, flags;
440 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
441 struct hw_perf_event *hwc = &event->hw;
440 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 442 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
443 int idx = hwc->idx;
441 444
442 if (ARMV6_CYCLE_COUNTER == idx) { 445 if (ARMV6_CYCLE_COUNTER == idx) {
443 mask = 0; 446 mask = 0;
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num,
473{ 476{
474 unsigned long pmcr = armv6_pmcr_read(); 477 unsigned long pmcr = armv6_pmcr_read();
475 struct perf_sample_data data; 478 struct perf_sample_data data;
476 struct pmu_hw_events *cpuc; 479 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
480 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
477 struct pt_regs *regs; 481 struct pt_regs *regs;
478 int idx; 482 int idx;
479 483
@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num,
489 */ 493 */
490 armv6_pmcr_write(pmcr); 494 armv6_pmcr_write(pmcr);
491 495
492 cpuc = &__get_cpu_var(cpu_hw_events);
493 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 496 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
494 struct perf_event *event = cpuc->events[idx]; 497 struct perf_event *event = cpuc->events[idx];
495 struct hw_perf_event *hwc; 498 struct hw_perf_event *hwc;
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num,
506 continue; 509 continue;
507 510
508 hwc = &event->hw; 511 hwc = &event->hw;
509 armpmu_event_update(event, hwc, idx); 512 armpmu_event_update(event);
510 perf_sample_data_init(&data, 0, hwc->last_period); 513 perf_sample_data_init(&data, 0, hwc->last_period);
511 if (!armpmu_event_set_period(event, hwc, idx)) 514 if (!armpmu_event_set_period(event))
512 continue; 515 continue;
513 516
514 if (perf_event_overflow(event, &data, regs)) 517 if (perf_event_overflow(event, &data, regs))
515 cpu_pmu->disable(hwc, idx); 518 cpu_pmu->disable(event);
516 } 519 }
517 520
518 /* 521 /*
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num,
527 return IRQ_HANDLED; 530 return IRQ_HANDLED;
528} 531}
529 532
530static void 533static void armv6pmu_start(struct arm_pmu *cpu_pmu)
531armv6pmu_start(void)
532{ 534{
533 unsigned long flags, val; 535 unsigned long flags, val;
534 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 536 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -540,8 +542,7 @@ armv6pmu_start(void)
540 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 542 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
541} 543}
542 544
543static void 545static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
544armv6pmu_stop(void)
545{ 546{
546 unsigned long flags, val; 547 unsigned long flags, val;
547 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 548 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -555,10 +556,11 @@ armv6pmu_stop(void)
555 556
556static int 557static int
557armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, 558armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
558 struct hw_perf_event *event) 559 struct perf_event *event)
559{ 560{
561 struct hw_perf_event *hwc = &event->hw;
560 /* Always place a cycle counter into the cycle counter. */ 562 /* Always place a cycle counter into the cycle counter. */
561 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { 563 if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
562 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) 564 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
563 return -EAGAIN; 565 return -EAGAIN;
564 566
@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
579 } 581 }
580} 582}
581 583
582static void 584static void armv6pmu_disable_event(struct perf_event *event)
583armv6pmu_disable_event(struct hw_perf_event *hwc,
584 int idx)
585{ 585{
586 unsigned long val, mask, evt, flags; 586 unsigned long val, mask, evt, flags;
587 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
588 struct hw_perf_event *hwc = &event->hw;
587 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 589 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
590 int idx = hwc->idx;
588 591
589 if (ARMV6_CYCLE_COUNTER == idx) { 592 if (ARMV6_CYCLE_COUNTER == idx) {
590 mask = ARMV6_PMCR_CCOUNT_IEN; 593 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
613 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 616 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
614} 617}
615 618
616static void 619static void armv6mpcore_pmu_disable_event(struct perf_event *event)
617armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
618 int idx)
619{ 620{
620 unsigned long val, mask, flags, evt = 0; 621 unsigned long val, mask, flags, evt = 0;
622 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
623 struct hw_perf_event *hwc = &event->hw;
621 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 624 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
625 int idx = hwc->idx;
622 626
623 if (ARMV6_CYCLE_COUNTER == idx) { 627 if (ARMV6_CYCLE_COUNTER == idx) {
624 mask = ARMV6_PMCR_CCOUNT_IEN; 628 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -649,24 +653,22 @@ static int armv6_map_event(struct perf_event *event)
649 &armv6_perf_cache_map, 0xFF); 653 &armv6_perf_cache_map, 0xFF);
650} 654}
651 655
652static struct arm_pmu armv6pmu = { 656static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu)
653 .name = "v6",
654 .handle_irq = armv6pmu_handle_irq,
655 .enable = armv6pmu_enable_event,
656 .disable = armv6pmu_disable_event,
657 .read_counter = armv6pmu_read_counter,
658 .write_counter = armv6pmu_write_counter,
659 .get_event_idx = armv6pmu_get_event_idx,
660 .start = armv6pmu_start,
661 .stop = armv6pmu_stop,
662 .map_event = armv6_map_event,
663 .num_events = 3,
664 .max_period = (1LLU << 32) - 1,
665};
666
667static struct arm_pmu *__devinit armv6pmu_init(void)
668{ 657{
669 return &armv6pmu; 658 cpu_pmu->name = "v6";
659 cpu_pmu->handle_irq = armv6pmu_handle_irq;
660 cpu_pmu->enable = armv6pmu_enable_event;
661 cpu_pmu->disable = armv6pmu_disable_event;
662 cpu_pmu->read_counter = armv6pmu_read_counter;
663 cpu_pmu->write_counter = armv6pmu_write_counter;
664 cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
665 cpu_pmu->start = armv6pmu_start;
666 cpu_pmu->stop = armv6pmu_stop;
667 cpu_pmu->map_event = armv6_map_event;
668 cpu_pmu->num_events = 3;
669 cpu_pmu->max_period = (1LLU << 32) - 1;
670
671 return 0;
670} 672}
671 673
672/* 674/*
@@ -683,33 +685,31 @@ static int armv6mpcore_map_event(struct perf_event *event)
683 &armv6mpcore_perf_cache_map, 0xFF); 685 &armv6mpcore_perf_cache_map, 0xFF);
684} 686}
685 687
686static struct arm_pmu armv6mpcore_pmu = { 688static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
687 .name = "v6mpcore",
688 .handle_irq = armv6pmu_handle_irq,
689 .enable = armv6pmu_enable_event,
690 .disable = armv6mpcore_pmu_disable_event,
691 .read_counter = armv6pmu_read_counter,
692 .write_counter = armv6pmu_write_counter,
693 .get_event_idx = armv6pmu_get_event_idx,
694 .start = armv6pmu_start,
695 .stop = armv6pmu_stop,
696 .map_event = armv6mpcore_map_event,
697 .num_events = 3,
698 .max_period = (1LLU << 32) - 1,
699};
700
701static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
702{ 689{
703 return &armv6mpcore_pmu; 690 cpu_pmu->name = "v6mpcore";
691 cpu_pmu->handle_irq = armv6pmu_handle_irq;
692 cpu_pmu->enable = armv6pmu_enable_event;
693 cpu_pmu->disable = armv6mpcore_pmu_disable_event;
694 cpu_pmu->read_counter = armv6pmu_read_counter;
695 cpu_pmu->write_counter = armv6pmu_write_counter;
696 cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
697 cpu_pmu->start = armv6pmu_start;
698 cpu_pmu->stop = armv6pmu_stop;
699 cpu_pmu->map_event = armv6mpcore_map_event;
700 cpu_pmu->num_events = 3;
701 cpu_pmu->max_period = (1LLU << 32) - 1;
702
703 return 0;
704} 704}
705#else 705#else
706static struct arm_pmu *__devinit armv6pmu_init(void) 706static int armv6pmu_init(struct arm_pmu *cpu_pmu)
707{ 707{
708 return NULL; 708 return -ENODEV;
709} 709}
710 710
711static struct arm_pmu *__devinit armv6mpcore_pmu_init(void) 711static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
712{ 712{
713 return NULL; 713 return -ENODEV;
714} 714}
715#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ 715#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index bd4b090ebcf..7d0cce85d17 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -18,8 +18,6 @@
18 18
19#ifdef CONFIG_CPU_V7 19#ifdef CONFIG_CPU_V7
20 20
21static struct arm_pmu armv7pmu;
22
23/* 21/*
24 * Common ARMv7 event types 22 * Common ARMv7 event types
25 * 23 *
@@ -738,7 +736,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
738 */ 736 */
739#define ARMV7_IDX_CYCLE_COUNTER 0 737#define ARMV7_IDX_CYCLE_COUNTER 0
740#define ARMV7_IDX_COUNTER0 1 738#define ARMV7_IDX_COUNTER0 1
741#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) 739#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
740 (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
742 741
743#define ARMV7_MAX_COUNTERS 32 742#define ARMV7_MAX_COUNTERS 32
744#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) 743#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
@@ -804,49 +803,34 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc)
804 return pmnc & ARMV7_OVERFLOWED_MASK; 803 return pmnc & ARMV7_OVERFLOWED_MASK;
805} 804}
806 805
807static inline int armv7_pmnc_counter_valid(int idx) 806static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
808{ 807{
809 return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; 808 return idx >= ARMV7_IDX_CYCLE_COUNTER &&
809 idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
810} 810}
811 811
812static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) 812static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
813{ 813{
814 int ret = 0; 814 return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
815 u32 counter;
816
817 if (!armv7_pmnc_counter_valid(idx)) {
818 pr_err("CPU%u checking wrong counter %d overflow status\n",
819 smp_processor_id(), idx);
820 } else {
821 counter = ARMV7_IDX_TO_COUNTER(idx);
822 ret = pmnc & BIT(counter);
823 }
824
825 return ret;
826} 815}
827 816
828static inline int armv7_pmnc_select_counter(int idx) 817static inline int armv7_pmnc_select_counter(int idx)
829{ 818{
830 u32 counter; 819 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
831
832 if (!armv7_pmnc_counter_valid(idx)) {
833 pr_err("CPU%u selecting wrong PMNC counter %d\n",
834 smp_processor_id(), idx);
835 return -EINVAL;
836 }
837
838 counter = ARMV7_IDX_TO_COUNTER(idx);
839 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); 820 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
840 isb(); 821 isb();
841 822
842 return idx; 823 return idx;
843} 824}
844 825
845static inline u32 armv7pmu_read_counter(int idx) 826static inline u32 armv7pmu_read_counter(struct perf_event *event)
846{ 827{
828 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
829 struct hw_perf_event *hwc = &event->hw;
830 int idx = hwc->idx;
847 u32 value = 0; 831 u32 value = 0;
848 832
849 if (!armv7_pmnc_counter_valid(idx)) 833 if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
850 pr_err("CPU%u reading wrong counter %d\n", 834 pr_err("CPU%u reading wrong counter %d\n",
851 smp_processor_id(), idx); 835 smp_processor_id(), idx);
852 else if (idx == ARMV7_IDX_CYCLE_COUNTER) 836 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
@@ -857,9 +841,13 @@ static inline u32 armv7pmu_read_counter(int idx)
857 return value; 841 return value;
858} 842}
859 843
860static inline void armv7pmu_write_counter(int idx, u32 value) 844static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
861{ 845{
862 if (!armv7_pmnc_counter_valid(idx)) 846 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
847 struct hw_perf_event *hwc = &event->hw;
848 int idx = hwc->idx;
849
850 if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
863 pr_err("CPU%u writing wrong counter %d\n", 851 pr_err("CPU%u writing wrong counter %d\n",
864 smp_processor_id(), idx); 852 smp_processor_id(), idx);
865 else if (idx == ARMV7_IDX_CYCLE_COUNTER) 853 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
@@ -878,60 +866,28 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
878 866
879static inline int armv7_pmnc_enable_counter(int idx) 867static inline int armv7_pmnc_enable_counter(int idx)
880{ 868{
881 u32 counter; 869 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
882
883 if (!armv7_pmnc_counter_valid(idx)) {
884 pr_err("CPU%u enabling wrong PMNC counter %d\n",
885 smp_processor_id(), idx);
886 return -EINVAL;
887 }
888
889 counter = ARMV7_IDX_TO_COUNTER(idx);
890 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); 870 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
891 return idx; 871 return idx;
892} 872}
893 873
894static inline int armv7_pmnc_disable_counter(int idx) 874static inline int armv7_pmnc_disable_counter(int idx)
895{ 875{
896 u32 counter; 876 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
897
898 if (!armv7_pmnc_counter_valid(idx)) {
899 pr_err("CPU%u disabling wrong PMNC counter %d\n",
900 smp_processor_id(), idx);
901 return -EINVAL;
902 }
903
904 counter = ARMV7_IDX_TO_COUNTER(idx);
905 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); 877 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
906 return idx; 878 return idx;
907} 879}
908 880
909static inline int armv7_pmnc_enable_intens(int idx) 881static inline int armv7_pmnc_enable_intens(int idx)
910{ 882{
911 u32 counter; 883 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
912
913 if (!armv7_pmnc_counter_valid(idx)) {
914 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
915 smp_processor_id(), idx);
916 return -EINVAL;
917 }
918
919 counter = ARMV7_IDX_TO_COUNTER(idx);
920 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); 884 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
921 return idx; 885 return idx;
922} 886}
923 887
924static inline int armv7_pmnc_disable_intens(int idx) 888static inline int armv7_pmnc_disable_intens(int idx)
925{ 889{
926 u32 counter; 890 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
927
928 if (!armv7_pmnc_counter_valid(idx)) {
929 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
930 smp_processor_id(), idx);
931 return -EINVAL;
932 }
933
934 counter = ARMV7_IDX_TO_COUNTER(idx);
935 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); 891 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
936 isb(); 892 isb();
937 /* Clear the overflow flag in case an interrupt is pending. */ 893 /* Clear the overflow flag in case an interrupt is pending. */
@@ -956,7 +912,7 @@ static inline u32 armv7_pmnc_getreset_flags(void)
956} 912}
957 913
958#ifdef DEBUG 914#ifdef DEBUG
959static void armv7_pmnc_dump_regs(void) 915static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
960{ 916{
961 u32 val; 917 u32 val;
962 unsigned int cnt; 918 unsigned int cnt;
@@ -981,7 +937,8 @@ static void armv7_pmnc_dump_regs(void)
981 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); 937 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
982 printk(KERN_INFO "CCNT =0x%08x\n", val); 938 printk(KERN_INFO "CCNT =0x%08x\n", val);
983 939
984 for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { 940 for (cnt = ARMV7_IDX_COUNTER0;
941 cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
985 armv7_pmnc_select_counter(cnt); 942 armv7_pmnc_select_counter(cnt);
986 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); 943 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
987 printk(KERN_INFO "CNT[%d] count =0x%08x\n", 944 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
@@ -993,10 +950,19 @@ static void armv7_pmnc_dump_regs(void)
993} 950}
994#endif 951#endif
995 952
996static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) 953static void armv7pmu_enable_event(struct perf_event *event)
997{ 954{
998 unsigned long flags; 955 unsigned long flags;
956 struct hw_perf_event *hwc = &event->hw;
957 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
999 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 958 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
959 int idx = hwc->idx;
960
961 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
962 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
963 smp_processor_id(), idx);
964 return;
965 }
1000 966
1001 /* 967 /*
1002 * Enable counter and interrupt, and set the counter to count 968 * Enable counter and interrupt, and set the counter to count
@@ -1014,7 +980,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1014 * We only need to set the event for the cycle counter if we 980 * We only need to set the event for the cycle counter if we
1015 * have the ability to perform event filtering. 981 * have the ability to perform event filtering.
1016 */ 982 */
1017 if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) 983 if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1018 armv7_pmnc_write_evtsel(idx, hwc->config_base); 984 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1019 985
1020 /* 986 /*
@@ -1030,10 +996,19 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1030 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 996 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1031} 997}
1032 998
1033static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) 999static void armv7pmu_disable_event(struct perf_event *event)
1034{ 1000{
1035 unsigned long flags; 1001 unsigned long flags;
1002 struct hw_perf_event *hwc = &event->hw;
1003 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1036 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1004 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1005 int idx = hwc->idx;
1006
1007 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1008 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
1009 smp_processor_id(), idx);
1010 return;
1011 }
1037 1012
1038 /* 1013 /*
1039 * Disable counter and interrupt 1014 * Disable counter and interrupt
@@ -1057,7 +1032,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1057{ 1032{
1058 u32 pmnc; 1033 u32 pmnc;
1059 struct perf_sample_data data; 1034 struct perf_sample_data data;
1060 struct pmu_hw_events *cpuc; 1035 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
1036 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
1061 struct pt_regs *regs; 1037 struct pt_regs *regs;
1062 int idx; 1038 int idx;
1063 1039
@@ -1077,7 +1053,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1077 */ 1053 */
1078 regs = get_irq_regs(); 1054 regs = get_irq_regs();
1079 1055
1080 cpuc = &__get_cpu_var(cpu_hw_events);
1081 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 1056 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1082 struct perf_event *event = cpuc->events[idx]; 1057 struct perf_event *event = cpuc->events[idx];
1083 struct hw_perf_event *hwc; 1058 struct hw_perf_event *hwc;
@@ -1094,13 +1069,13 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1094 continue; 1069 continue;
1095 1070
1096 hwc = &event->hw; 1071 hwc = &event->hw;
1097 armpmu_event_update(event, hwc, idx); 1072 armpmu_event_update(event);
1098 perf_sample_data_init(&data, 0, hwc->last_period); 1073 perf_sample_data_init(&data, 0, hwc->last_period);
1099 if (!armpmu_event_set_period(event, hwc, idx)) 1074 if (!armpmu_event_set_period(event))
1100 continue; 1075 continue;
1101 1076
1102 if (perf_event_overflow(event, &data, regs)) 1077 if (perf_event_overflow(event, &data, regs))
1103 cpu_pmu->disable(hwc, idx); 1078 cpu_pmu->disable(event);
1104 } 1079 }
1105 1080
1106 /* 1081 /*
@@ -1115,7 +1090,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1115 return IRQ_HANDLED; 1090 return IRQ_HANDLED;
1116} 1091}
1117 1092
1118static void armv7pmu_start(void) 1093static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1119{ 1094{
1120 unsigned long flags; 1095 unsigned long flags;
1121 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1096 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -1126,7 +1101,7 @@ static void armv7pmu_start(void)
1126 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 1101 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1127} 1102}
1128 1103
1129static void armv7pmu_stop(void) 1104static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1130{ 1105{
1131 unsigned long flags; 1106 unsigned long flags;
1132 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1107 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -1138,10 +1113,12 @@ static void armv7pmu_stop(void)
1138} 1113}
1139 1114
1140static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, 1115static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1141 struct hw_perf_event *event) 1116 struct perf_event *event)
1142{ 1117{
1143 int idx; 1118 int idx;
1144 unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; 1119 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1120 struct hw_perf_event *hwc = &event->hw;
1121 unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1145 1122
1146 /* Always place a cycle counter into the cycle counter. */ 1123 /* Always place a cycle counter into the cycle counter. */
1147 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { 1124 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
@@ -1192,11 +1169,14 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1192 1169
1193static void armv7pmu_reset(void *info) 1170static void armv7pmu_reset(void *info)
1194{ 1171{
1172 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1195 u32 idx, nb_cnt = cpu_pmu->num_events; 1173 u32 idx, nb_cnt = cpu_pmu->num_events;
1196 1174
1197 /* The counter and interrupt enable registers are unknown at reset. */ 1175 /* The counter and interrupt enable registers are unknown at reset. */
1198 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) 1176 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1199 armv7pmu_disable_event(NULL, idx); 1177 armv7_pmnc_disable_counter(idx);
1178 armv7_pmnc_disable_intens(idx);
1179 }
1200 1180
1201 /* Initialize & Reset PMNC: C and P bits */ 1181 /* Initialize & Reset PMNC: C and P bits */
1202 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); 1182 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
@@ -1232,17 +1212,18 @@ static int armv7_a7_map_event(struct perf_event *event)
1232 &armv7_a7_perf_cache_map, 0xFF); 1212 &armv7_a7_perf_cache_map, 0xFF);
1233} 1213}
1234 1214
1235static struct arm_pmu armv7pmu = { 1215static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1236 .handle_irq = armv7pmu_handle_irq, 1216{
1237 .enable = armv7pmu_enable_event, 1217 cpu_pmu->handle_irq = armv7pmu_handle_irq;
1238 .disable = armv7pmu_disable_event, 1218 cpu_pmu->enable = armv7pmu_enable_event;
1239 .read_counter = armv7pmu_read_counter, 1219 cpu_pmu->disable = armv7pmu_disable_event;
1240 .write_counter = armv7pmu_write_counter, 1220 cpu_pmu->read_counter = armv7pmu_read_counter;
1241 .get_event_idx = armv7pmu_get_event_idx, 1221 cpu_pmu->write_counter = armv7pmu_write_counter;
1242 .start = armv7pmu_start, 1222 cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
1243 .stop = armv7pmu_stop, 1223 cpu_pmu->start = armv7pmu_start;
1244 .reset = armv7pmu_reset, 1224 cpu_pmu->stop = armv7pmu_stop;
1245 .max_period = (1LLU << 32) - 1, 1225 cpu_pmu->reset = armv7pmu_reset;
1226 cpu_pmu->max_period = (1LLU << 32) - 1;
1246}; 1227};
1247 1228
1248static u32 __devinit armv7_read_num_pmnc_events(void) 1229static u32 __devinit armv7_read_num_pmnc_events(void)
@@ -1256,70 +1237,75 @@ static u32 __devinit armv7_read_num_pmnc_events(void)
1256 return nb_cnt + 1; 1237 return nb_cnt + 1;
1257} 1238}
1258 1239
1259static struct arm_pmu *__devinit armv7_a8_pmu_init(void) 1240static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1260{ 1241{
1261 armv7pmu.name = "ARMv7 Cortex-A8"; 1242 armv7pmu_init(cpu_pmu);
1262 armv7pmu.map_event = armv7_a8_map_event; 1243 cpu_pmu->name = "ARMv7 Cortex-A8";
1263 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1244 cpu_pmu->map_event = armv7_a8_map_event;
1264 return &armv7pmu; 1245 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1246 return 0;
1265} 1247}
1266 1248
1267static struct arm_pmu *__devinit armv7_a9_pmu_init(void) 1249static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1268{ 1250{
1269 armv7pmu.name = "ARMv7 Cortex-A9"; 1251 armv7pmu_init(cpu_pmu);
1270 armv7pmu.map_event = armv7_a9_map_event; 1252 cpu_pmu->name = "ARMv7 Cortex-A9";
1271 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1253 cpu_pmu->map_event = armv7_a9_map_event;
1272 return &armv7pmu; 1254 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1255 return 0;
1273} 1256}
1274 1257
1275static struct arm_pmu *__devinit armv7_a5_pmu_init(void) 1258static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1276{ 1259{
1277 armv7pmu.name = "ARMv7 Cortex-A5"; 1260 armv7pmu_init(cpu_pmu);
1278 armv7pmu.map_event = armv7_a5_map_event; 1261 cpu_pmu->name = "ARMv7 Cortex-A5";
1279 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1262 cpu_pmu->map_event = armv7_a5_map_event;
1280 return &armv7pmu; 1263 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1264 return 0;
1281} 1265}
1282 1266
1283static struct arm_pmu *__devinit armv7_a15_pmu_init(void) 1267static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1284{ 1268{
1285 armv7pmu.name = "ARMv7 Cortex-A15"; 1269 armv7pmu_init(cpu_pmu);
1286 armv7pmu.map_event = armv7_a15_map_event; 1270 cpu_pmu->name = "ARMv7 Cortex-A15";
1287 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1271 cpu_pmu->map_event = armv7_a15_map_event;
1288 armv7pmu.set_event_filter = armv7pmu_set_event_filter; 1272 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1289 return &armv7pmu; 1273 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1274 return 0;
1290} 1275}
1291 1276
1292static struct arm_pmu *__devinit armv7_a7_pmu_init(void) 1277static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1293{ 1278{
1294 armv7pmu.name = "ARMv7 Cortex-A7"; 1279 armv7pmu_init(cpu_pmu);
1295 armv7pmu.map_event = armv7_a7_map_event; 1280 cpu_pmu->name = "ARMv7 Cortex-A7";
1296 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1281 cpu_pmu->map_event = armv7_a7_map_event;
1297 armv7pmu.set_event_filter = armv7pmu_set_event_filter; 1282 cpu_pmu->num_events = armv7_read_num_pmnc_events();
1298 return &armv7pmu; 1283 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1284 return 0;
1299} 1285}
1300#else 1286#else
1301static struct arm_pmu *__devinit armv7_a8_pmu_init(void) 1287static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1302{ 1288{
1303 return NULL; 1289 return -ENODEV;
1304} 1290}
1305 1291
1306static struct arm_pmu *__devinit armv7_a9_pmu_init(void) 1292static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1307{ 1293{
1308 return NULL; 1294 return -ENODEV;
1309} 1295}
1310 1296
1311static struct arm_pmu *__devinit armv7_a5_pmu_init(void) 1297static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1312{ 1298{
1313 return NULL; 1299 return -ENODEV;
1314} 1300}
1315 1301
1316static struct arm_pmu *__devinit armv7_a15_pmu_init(void) 1302static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1317{ 1303{
1318 return NULL; 1304 return -ENODEV;
1319} 1305}
1320 1306
1321static struct arm_pmu *__devinit armv7_a7_pmu_init(void) 1307static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1322{ 1308{
1323 return NULL; 1309 return -ENODEV;
1324} 1310}
1325#endif /* CONFIG_CPU_V7 */ 1311#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 426e19f380a..0c8265e53d5 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -224,7 +224,8 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
224{ 224{
225 unsigned long pmnc; 225 unsigned long pmnc;
226 struct perf_sample_data data; 226 struct perf_sample_data data;
227 struct pmu_hw_events *cpuc; 227 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
228 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
228 struct pt_regs *regs; 229 struct pt_regs *regs;
229 int idx; 230 int idx;
230 231
@@ -248,7 +249,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
248 249
249 regs = get_irq_regs(); 250 regs = get_irq_regs();
250 251
251 cpuc = &__get_cpu_var(cpu_hw_events);
252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
253 struct perf_event *event = cpuc->events[idx]; 253 struct perf_event *event = cpuc->events[idx];
254 struct hw_perf_event *hwc; 254 struct hw_perf_event *hwc;
@@ -260,13 +260,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
260 continue; 260 continue;
261 261
262 hwc = &event->hw; 262 hwc = &event->hw;
263 armpmu_event_update(event, hwc, idx); 263 armpmu_event_update(event);
264 perf_sample_data_init(&data, 0, hwc->last_period); 264 perf_sample_data_init(&data, 0, hwc->last_period);
265 if (!armpmu_event_set_period(event, hwc, idx)) 265 if (!armpmu_event_set_period(event))
266 continue; 266 continue;
267 267
268 if (perf_event_overflow(event, &data, regs)) 268 if (perf_event_overflow(event, &data, regs))
269 cpu_pmu->disable(hwc, idx); 269 cpu_pmu->disable(event);
270 } 270 }
271 271
272 irq_work_run(); 272 irq_work_run();
@@ -280,11 +280,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
280 return IRQ_HANDLED; 280 return IRQ_HANDLED;
281} 281}
282 282
283static void 283static void xscale1pmu_enable_event(struct perf_event *event)
284xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
285{ 284{
286 unsigned long val, mask, evt, flags; 285 unsigned long val, mask, evt, flags;
286 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
287 struct hw_perf_event *hwc = &event->hw;
287 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 288 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
289 int idx = hwc->idx;
288 290
289 switch (idx) { 291 switch (idx) {
290 case XSCALE_CYCLE_COUNTER: 292 case XSCALE_CYCLE_COUNTER:
@@ -314,11 +316,13 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
314 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 316 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
315} 317}
316 318
317static void 319static void xscale1pmu_disable_event(struct perf_event *event)
318xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
319{ 320{
320 unsigned long val, mask, evt, flags; 321 unsigned long val, mask, evt, flags;
322 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
323 struct hw_perf_event *hwc = &event->hw;
321 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 324 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
325 int idx = hwc->idx;
322 326
323 switch (idx) { 327 switch (idx) {
324 case XSCALE_CYCLE_COUNTER: 328 case XSCALE_CYCLE_COUNTER:
@@ -348,9 +352,10 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
348 352
349static int 353static int
350xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, 354xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
351 struct hw_perf_event *event) 355 struct perf_event *event)
352{ 356{
353 if (XSCALE_PERFCTR_CCNT == event->config_base) { 357 struct hw_perf_event *hwc = &event->hw;
358 if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
354 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) 359 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
355 return -EAGAIN; 360 return -EAGAIN;
356 361
@@ -366,8 +371,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
366 } 371 }
367} 372}
368 373
369static void 374static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
370xscale1pmu_start(void)
371{ 375{
372 unsigned long flags, val; 376 unsigned long flags, val;
373 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 377 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -379,8 +383,7 @@ xscale1pmu_start(void)
379 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 383 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
380} 384}
381 385
382static void 386static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
383xscale1pmu_stop(void)
384{ 387{
385 unsigned long flags, val; 388 unsigned long flags, val;
386 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 389 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -392,9 +395,10 @@ xscale1pmu_stop(void)
392 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 395 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
393} 396}
394 397
395static inline u32 398static inline u32 xscale1pmu_read_counter(struct perf_event *event)
396xscale1pmu_read_counter(int counter)
397{ 399{
400 struct hw_perf_event *hwc = &event->hw;
401 int counter = hwc->idx;
398 u32 val = 0; 402 u32 val = 0;
399 403
400 switch (counter) { 404 switch (counter) {
@@ -412,9 +416,11 @@ xscale1pmu_read_counter(int counter)
412 return val; 416 return val;
413} 417}
414 418
415static inline void 419static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
416xscale1pmu_write_counter(int counter, u32 val)
417{ 420{
421 struct hw_perf_event *hwc = &event->hw;
422 int counter = hwc->idx;
423
418 switch (counter) { 424 switch (counter) {
419 case XSCALE_CYCLE_COUNTER: 425 case XSCALE_CYCLE_COUNTER:
420 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); 426 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
@@ -434,24 +440,22 @@ static int xscale_map_event(struct perf_event *event)
434 &xscale_perf_cache_map, 0xFF); 440 &xscale_perf_cache_map, 0xFF);
435} 441}
436 442
437static struct arm_pmu xscale1pmu = { 443static int __devinit xscale1pmu_init(struct arm_pmu *cpu_pmu)
438 .name = "xscale1",
439 .handle_irq = xscale1pmu_handle_irq,
440 .enable = xscale1pmu_enable_event,
441 .disable = xscale1pmu_disable_event,
442 .read_counter = xscale1pmu_read_counter,
443 .write_counter = xscale1pmu_write_counter,
444 .get_event_idx = xscale1pmu_get_event_idx,
445 .start = xscale1pmu_start,
446 .stop = xscale1pmu_stop,
447 .map_event = xscale_map_event,
448 .num_events = 3,
449 .max_period = (1LLU << 32) - 1,
450};
451
452static struct arm_pmu *__devinit xscale1pmu_init(void)
453{ 444{
454 return &xscale1pmu; 445 cpu_pmu->name = "xscale1";
446 cpu_pmu->handle_irq = xscale1pmu_handle_irq;
447 cpu_pmu->enable = xscale1pmu_enable_event;
448 cpu_pmu->disable = xscale1pmu_disable_event;
449 cpu_pmu->read_counter = xscale1pmu_read_counter;
450 cpu_pmu->write_counter = xscale1pmu_write_counter;
451 cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
452 cpu_pmu->start = xscale1pmu_start;
453 cpu_pmu->stop = xscale1pmu_stop;
454 cpu_pmu->map_event = xscale_map_event;
455 cpu_pmu->num_events = 3;
456 cpu_pmu->max_period = (1LLU << 32) - 1;
457
458 return 0;
455} 459}
456 460
457#define XSCALE2_OVERFLOWED_MASK 0x01f 461#define XSCALE2_OVERFLOWED_MASK 0x01f
@@ -567,7 +571,8 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
567{ 571{
568 unsigned long pmnc, of_flags; 572 unsigned long pmnc, of_flags;
569 struct perf_sample_data data; 573 struct perf_sample_data data;
570 struct pmu_hw_events *cpuc; 574 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
575 struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
571 struct pt_regs *regs; 576 struct pt_regs *regs;
572 int idx; 577 int idx;
573 578
@@ -585,7 +590,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
585 590
586 regs = get_irq_regs(); 591 regs = get_irq_regs();
587 592
588 cpuc = &__get_cpu_var(cpu_hw_events);
589 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 593 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
590 struct perf_event *event = cpuc->events[idx]; 594 struct perf_event *event = cpuc->events[idx];
591 struct hw_perf_event *hwc; 595 struct hw_perf_event *hwc;
@@ -597,13 +601,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
597 continue; 601 continue;
598 602
599 hwc = &event->hw; 603 hwc = &event->hw;
600 armpmu_event_update(event, hwc, idx); 604 armpmu_event_update(event);
601 perf_sample_data_init(&data, 0, hwc->last_period); 605 perf_sample_data_init(&data, 0, hwc->last_period);
602 if (!armpmu_event_set_period(event, hwc, idx)) 606 if (!armpmu_event_set_period(event))
603 continue; 607 continue;
604 608
605 if (perf_event_overflow(event, &data, regs)) 609 if (perf_event_overflow(event, &data, regs))
606 cpu_pmu->disable(hwc, idx); 610 cpu_pmu->disable(event);
607 } 611 }
608 612
609 irq_work_run(); 613 irq_work_run();
@@ -617,11 +621,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
617 return IRQ_HANDLED; 621 return IRQ_HANDLED;
618} 622}
619 623
620static void 624static void xscale2pmu_enable_event(struct perf_event *event)
621xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
622{ 625{
623 unsigned long flags, ien, evtsel; 626 unsigned long flags, ien, evtsel;
627 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
628 struct hw_perf_event *hwc = &event->hw;
624 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 629 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
630 int idx = hwc->idx;
625 631
626 ien = xscale2pmu_read_int_enable(); 632 ien = xscale2pmu_read_int_enable();
627 evtsel = xscale2pmu_read_event_select(); 633 evtsel = xscale2pmu_read_event_select();
@@ -661,11 +667,13 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
661 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 667 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
662} 668}
663 669
664static void 670static void xscale2pmu_disable_event(struct perf_event *event)
665xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
666{ 671{
667 unsigned long flags, ien, evtsel, of_flags; 672 unsigned long flags, ien, evtsel, of_flags;
673 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
674 struct hw_perf_event *hwc = &event->hw;
668 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 675 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
676 int idx = hwc->idx;
669 677
670 ien = xscale2pmu_read_int_enable(); 678 ien = xscale2pmu_read_int_enable();
671 evtsel = xscale2pmu_read_event_select(); 679 evtsel = xscale2pmu_read_event_select();
@@ -713,7 +721,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
713 721
714static int 722static int
715xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, 723xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
716 struct hw_perf_event *event) 724 struct perf_event *event)
717{ 725{
718 int idx = xscale1pmu_get_event_idx(cpuc, event); 726 int idx = xscale1pmu_get_event_idx(cpuc, event);
719 if (idx >= 0) 727 if (idx >= 0)
@@ -727,8 +735,7 @@ out:
727 return idx; 735 return idx;
728} 736}
729 737
730static void 738static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
731xscale2pmu_start(void)
732{ 739{
733 unsigned long flags, val; 740 unsigned long flags, val;
734 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 741 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -740,8 +747,7 @@ xscale2pmu_start(void)
740 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 747 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
741} 748}
742 749
743static void 750static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
744xscale2pmu_stop(void)
745{ 751{
746 unsigned long flags, val; 752 unsigned long flags, val;
747 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 753 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -753,9 +759,10 @@ xscale2pmu_stop(void)
753 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 759 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
754} 760}
755 761
756static inline u32 762static inline u32 xscale2pmu_read_counter(struct perf_event *event)
757xscale2pmu_read_counter(int counter)
758{ 763{
764 struct hw_perf_event *hwc = &event->hw;
765 int counter = hwc->idx;
759 u32 val = 0; 766 u32 val = 0;
760 767
761 switch (counter) { 768 switch (counter) {
@@ -779,9 +786,11 @@ xscale2pmu_read_counter(int counter)
779 return val; 786 return val;
780} 787}
781 788
782static inline void 789static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
783xscale2pmu_write_counter(int counter, u32 val)
784{ 790{
791 struct hw_perf_event *hwc = &event->hw;
792 int counter = hwc->idx;
793
785 switch (counter) { 794 switch (counter) {
786 case XSCALE_CYCLE_COUNTER: 795 case XSCALE_CYCLE_COUNTER:
787 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); 796 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
@@ -801,33 +810,31 @@ xscale2pmu_write_counter(int counter, u32 val)
801 } 810 }
802} 811}
803 812
804static struct arm_pmu xscale2pmu = { 813static int __devinit xscale2pmu_init(struct arm_pmu *cpu_pmu)
805 .name = "xscale2",
806 .handle_irq = xscale2pmu_handle_irq,
807 .enable = xscale2pmu_enable_event,
808 .disable = xscale2pmu_disable_event,
809 .read_counter = xscale2pmu_read_counter,
810 .write_counter = xscale2pmu_write_counter,
811 .get_event_idx = xscale2pmu_get_event_idx,
812 .start = xscale2pmu_start,
813 .stop = xscale2pmu_stop,
814 .map_event = xscale_map_event,
815 .num_events = 5,
816 .max_period = (1LLU << 32) - 1,
817};
818
819static struct arm_pmu *__devinit xscale2pmu_init(void)
820{ 814{
821 return &xscale2pmu; 815 cpu_pmu->name = "xscale2";
816 cpu_pmu->handle_irq = xscale2pmu_handle_irq;
817 cpu_pmu->enable = xscale2pmu_enable_event;
818 cpu_pmu->disable = xscale2pmu_disable_event;
819 cpu_pmu->read_counter = xscale2pmu_read_counter;
820 cpu_pmu->write_counter = xscale2pmu_write_counter;
821 cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
822 cpu_pmu->start = xscale2pmu_start;
823 cpu_pmu->stop = xscale2pmu_stop;
824 cpu_pmu->map_event = xscale_map_event;
825 cpu_pmu->num_events = 5;
826 cpu_pmu->max_period = (1LLU << 32) - 1;
827
828 return 0;
822} 829}
823#else 830#else
824static struct arm_pmu *__devinit xscale1pmu_init(void) 831static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu)
825{ 832{
826 return NULL; 833 return -ENODEV;
827} 834}
828 835
829static struct arm_pmu *__devinit xscale2pmu_init(void) 836static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu)
830{ 837{
831 return NULL; 838 return -ENODEV;
832} 839}
833#endif /* CONFIG_CPU_XSCALE */ 840#endif /* CONFIG_CPU_XSCALE */
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index da1d1aa20ad..9a89bf4aefe 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -383,6 +383,12 @@ void cpu_init(void)
383 BUG(); 383 BUG();
384 } 384 }
385 385
386 /*
387 * This only works on resume and secondary cores. For booting on the
388 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
389 */
390 set_my_cpu_offset(per_cpu_offset(cpu));
391
386 cpu_proc_init(); 392 cpu_proc_init();
387 393
388 /* 394 /*
@@ -426,13 +432,14 @@ int __cpu_logical_map[NR_CPUS];
426void __init smp_setup_processor_id(void) 432void __init smp_setup_processor_id(void)
427{ 433{
428 int i; 434 int i;
429 u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0; 435 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
436 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
430 437
431 cpu_logical_map(0) = cpu; 438 cpu_logical_map(0) = cpu;
432 for (i = 1; i < NR_CPUS; ++i) 439 for (i = 1; i < nr_cpu_ids; ++i)
433 cpu_logical_map(i) = i == cpu ? 0 : i; 440 cpu_logical_map(i) = i == cpu ? 0 : i;
434 441
435 printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu); 442 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
436} 443}
437 444
438static void __init setup_processor(void) 445static void __init setup_processor(void)
@@ -758,6 +765,7 @@ void __init setup_arch(char **cmdline_p)
758 765
759 unflatten_device_tree(); 766 unflatten_device_tree();
760 767
768 arm_dt_init_cpu_maps();
761#ifdef CONFIG_SMP 769#ifdef CONFIG_SMP
762 if (is_smp()) { 770 if (is_smp()) {
763 smp_set_ops(mdesc->smp); 771 smp_set_ops(mdesc->smp);
@@ -841,12 +849,9 @@ static const char *hwcap_str[] = {
841 849
842static int c_show(struct seq_file *m, void *v) 850static int c_show(struct seq_file *m, void *v)
843{ 851{
844 int i; 852 int i, j;
853 u32 cpuid;
845 854
846 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
847 cpu_name, read_cpuid_id() & 15, elf_platform);
848
849#if defined(CONFIG_SMP)
850 for_each_online_cpu(i) { 855 for_each_online_cpu(i) {
851 /* 856 /*
852 * glibc reads /proc/cpuinfo to determine the number of 857 * glibc reads /proc/cpuinfo to determine the number of
@@ -854,45 +859,48 @@ static int c_show(struct seq_file *m, void *v)
854 * "processor". Give glibc what it expects. 859 * "processor". Give glibc what it expects.
855 */ 860 */
856 seq_printf(m, "processor\t: %d\n", i); 861 seq_printf(m, "processor\t: %d\n", i);
857 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", 862 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
863 seq_printf(m, "model name\t: %s rev %d (%s)\n",
864 cpu_name, cpuid & 15, elf_platform);
865
866#if defined(CONFIG_SMP)
867 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
858 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 868 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
859 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 869 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
860 } 870#else
861#else /* CONFIG_SMP */ 871 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
862 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 872 loops_per_jiffy / (500000/HZ),
863 loops_per_jiffy / (500000/HZ), 873 (loops_per_jiffy / (5000/HZ)) % 100);
864 (loops_per_jiffy / (5000/HZ)) % 100);
865#endif 874#endif
875 /* dump out the processor features */
876 seq_puts(m, "Features\t: ");
866 877
867 /* dump out the processor features */ 878 for (j = 0; hwcap_str[j]; j++)
868 seq_puts(m, "Features\t: "); 879 if (elf_hwcap & (1 << j))
869 880 seq_printf(m, "%s ", hwcap_str[j]);
870 for (i = 0; hwcap_str[i]; i++)
871 if (elf_hwcap & (1 << i))
872 seq_printf(m, "%s ", hwcap_str[i]);
873 881
874 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); 882 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
875 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); 883 seq_printf(m, "CPU architecture: %s\n",
884 proc_arch[cpu_architecture()]);
876 885
877 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { 886 if ((cpuid & 0x0008f000) == 0x00000000) {
878 /* pre-ARM7 */ 887 /* pre-ARM7 */
879 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); 888 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
880 } else {
881 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
882 /* ARM7 */
883 seq_printf(m, "CPU variant\t: 0x%02x\n",
884 (read_cpuid_id() >> 16) & 127);
885 } else { 889 } else {
886 /* post-ARM7 */ 890 if ((cpuid & 0x0008f000) == 0x00007000) {
887 seq_printf(m, "CPU variant\t: 0x%x\n", 891 /* ARM7 */
888 (read_cpuid_id() >> 20) & 15); 892 seq_printf(m, "CPU variant\t: 0x%02x\n",
893 (cpuid >> 16) & 127);
894 } else {
895 /* post-ARM7 */
896 seq_printf(m, "CPU variant\t: 0x%x\n",
897 (cpuid >> 20) & 15);
898 }
899 seq_printf(m, "CPU part\t: 0x%03x\n",
900 (cpuid >> 4) & 0xfff);
889 } 901 }
890 seq_printf(m, "CPU part\t: 0x%03x\n", 902 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
891 (read_cpuid_id() >> 4) & 0xfff);
892 } 903 }
893 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
894
895 seq_puts(m, "\n");
896 904
897 seq_printf(m, "Hardware\t: %s\n", machine_name); 905 seq_printf(m, "Hardware\t: %s\n", machine_name);
898 seq_printf(m, "Revision\t: %04x\n", system_rev); 906 seq_printf(m, "Revision\t: %04x\n", system_rev);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 57f53773197..84f4cbf652e 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -281,6 +281,7 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
281 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 281 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
282 282
283 cpu_info->loops_per_jiffy = loops_per_jiffy; 283 cpu_info->loops_per_jiffy = loops_per_jiffy;
284 cpu_info->cpuid = read_cpuid_id();
284 285
285 store_cpu_topology(cpuid); 286 store_cpu_topology(cpuid);
286} 287}
@@ -313,9 +314,10 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
313 current->active_mm = mm; 314 current->active_mm = mm;
314 cpumask_set_cpu(cpu, mm_cpumask(mm)); 315 cpumask_set_cpu(cpu, mm_cpumask(mm));
315 316
317 cpu_init();
318
316 printk("CPU%u: Booted secondary processor\n", cpu); 319 printk("CPU%u: Booted secondary processor\n", cpu);
317 320
318 cpu_init();
319 preempt_disable(); 321 preempt_disable();
320 trace_hardirqs_off(); 322 trace_hardirqs_off();
321 323
@@ -371,6 +373,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
371 373
372void __init smp_prepare_boot_cpu(void) 374void __init smp_prepare_boot_cpu(void)
373{ 375{
376 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
374} 377}
375 378
376void __init smp_prepare_cpus(unsigned int max_cpus) 379void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 26c12c6440f..79282ebcd93 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -196,32 +196,7 @@ static inline void parse_dt_topology(void) {}
196static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} 196static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
197#endif 197#endif
198 198
199 199 /*
200/*
201 * cpu topology management
202 */
203
204#define MPIDR_SMP_BITMASK (0x3 << 30)
205#define MPIDR_SMP_VALUE (0x2 << 30)
206
207#define MPIDR_MT_BITMASK (0x1 << 24)
208
209/*
210 * These masks reflect the current use of the affinity levels.
211 * The affinity level can be up to 16 bits according to ARM ARM
212 */
213#define MPIDR_HWID_BITMASK 0xFFFFFF
214
215#define MPIDR_LEVEL0_MASK 0x3
216#define MPIDR_LEVEL0_SHIFT 0
217
218#define MPIDR_LEVEL1_MASK 0xF
219#define MPIDR_LEVEL1_SHIFT 8
220
221#define MPIDR_LEVEL2_MASK 0xFF
222#define MPIDR_LEVEL2_SHIFT 16
223
224/*
225 * cpu topology table 200 * cpu topology table
226 */ 201 */
227struct cputopo_arm cpu_topology[NR_CPUS]; 202struct cputopo_arm cpu_topology[NR_CPUS];
@@ -282,19 +257,14 @@ void store_cpu_topology(unsigned int cpuid)
282 257
283 if (mpidr & MPIDR_MT_BITMASK) { 258 if (mpidr & MPIDR_MT_BITMASK) {
284 /* core performance interdependency */ 259 /* core performance interdependency */
285 cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT) 260 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
286 & MPIDR_LEVEL0_MASK; 261 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
287 cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT) 262 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
288 & MPIDR_LEVEL1_MASK;
289 cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
290 & MPIDR_LEVEL2_MASK;
291 } else { 263 } else {
292 /* largely independent cores */ 264 /* largely independent cores */
293 cpuid_topo->thread_id = -1; 265 cpuid_topo->thread_id = -1;
294 cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT) 266 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
295 & MPIDR_LEVEL0_MASK; 267 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
296 cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
297 & MPIDR_LEVEL1_MASK;
298 } 268 }
299 } else { 269 } else {
300 /* 270 /*
diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c
index 2a791766283..031e2fbd0e1 100644
--- a/arch/arm/mach-omap2/pmu.c
+++ b/arch/arm/mach-omap2/pmu.c
@@ -57,8 +57,6 @@ static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[])
57 if (IS_ERR(omap_pmu_dev)) 57 if (IS_ERR(omap_pmu_dev))
58 return PTR_ERR(omap_pmu_dev); 58 return PTR_ERR(omap_pmu_dev);
59 59
60 pm_runtime_enable(&omap_pmu_dev->dev);
61
62 return 0; 60 return 0;
63} 61}
64 62
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 4e07eec1270..bc4a5e9ebb7 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -2,6 +2,9 @@
2 * linux/arch/arm/mm/context.c 2 * linux/arch/arm/mm/context.c
3 * 3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
5 * 8 *
6 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -14,14 +17,40 @@
14#include <linux/percpu.h> 17#include <linux/percpu.h>
15 18
16#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
20#include <asm/smp_plat.h>
17#include <asm/thread_notify.h> 21#include <asm/thread_notify.h>
18#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
19 23
24/*
25 * On ARMv6, we have the following structure in the Context ID:
26 *
27 * 31 7 0
28 * +-------------------------+-----------+
29 * | process ID | ASID |
30 * +-------------------------+-----------+
31 * | context ID |
32 * +-------------------------------------+
33 *
34 * The ASID is used to tag entries in the CPU caches and TLBs.
35 * The context ID is used by debuggers and trace logic, and
36 * should be unique within all running processes.
37 */
38#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
39#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
40
41#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
42#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
43
20static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 44static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
21unsigned int cpu_last_asid = ASID_FIRST_VERSION; 45static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
46static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
47
48static DEFINE_PER_CPU(atomic64_t, active_asids);
49static DEFINE_PER_CPU(u64, reserved_asids);
50static cpumask_t tlb_flush_pending;
22 51
23#ifdef CONFIG_ARM_LPAE 52#ifdef CONFIG_ARM_LPAE
24void cpu_set_reserved_ttbr0(void) 53static void cpu_set_reserved_ttbr0(void)
25{ 54{
26 unsigned long ttbl = __pa(swapper_pg_dir); 55 unsigned long ttbl = __pa(swapper_pg_dir);
27 unsigned long ttbh = 0; 56 unsigned long ttbh = 0;
@@ -37,7 +66,7 @@ void cpu_set_reserved_ttbr0(void)
37 isb(); 66 isb();
38} 67}
39#else 68#else
40void cpu_set_reserved_ttbr0(void) 69static void cpu_set_reserved_ttbr0(void)
41{ 70{
42 u32 ttb; 71 u32 ttb;
43 /* Copy TTBR1 into TTBR0 */ 72 /* Copy TTBR1 into TTBR0 */
@@ -84,124 +113,104 @@ static int __init contextidr_notifier_init(void)
84arch_initcall(contextidr_notifier_init); 113arch_initcall(contextidr_notifier_init);
85#endif 114#endif
86 115
87/* 116static void flush_context(unsigned int cpu)
88 * We fork()ed a process, and we need a new context for the child
89 * to run in.
90 */
91void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
92{ 117{
93 mm->context.id = 0; 118 int i;
94 raw_spin_lock_init(&mm->context.id_lock); 119 u64 asid;
95} 120
121 /* Update the list of reserved ASIDs and the ASID bitmap. */
122 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
123 for_each_possible_cpu(i) {
124 if (i == cpu) {
125 asid = 0;
126 } else {
127 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
128 __set_bit(ASID_TO_IDX(asid), asid_map);
129 }
130 per_cpu(reserved_asids, i) = asid;
131 }
96 132
97static void flush_context(void) 133 /* Queue a TLB invalidate and flush the I-cache if necessary. */
98{ 134 if (!tlb_ops_need_broadcast())
99 cpu_set_reserved_ttbr0(); 135 cpumask_set_cpu(cpu, &tlb_flush_pending);
100 local_flush_tlb_all(); 136 else
101 if (icache_is_vivt_asid_tagged()) { 137 cpumask_setall(&tlb_flush_pending);
138
139 if (icache_is_vivt_asid_tagged())
102 __flush_icache_all(); 140 __flush_icache_all();
103 dsb();
104 }
105} 141}
106 142
107#ifdef CONFIG_SMP 143static int is_reserved_asid(u64 asid)
144{
145 int cpu;
146 for_each_possible_cpu(cpu)
147 if (per_cpu(reserved_asids, cpu) == asid)
148 return 1;
149 return 0;
150}
108 151
109static void set_mm_context(struct mm_struct *mm, unsigned int asid) 152static void new_context(struct mm_struct *mm, unsigned int cpu)
110{ 153{
111 unsigned long flags; 154 u64 asid = mm->context.id;
155 u64 generation = atomic64_read(&asid_generation);
112 156
113 /* 157 if (asid != 0 && is_reserved_asid(asid)) {
114 * Locking needed for multi-threaded applications where the
115 * same mm->context.id could be set from different CPUs during
116 * the broadcast. This function is also called via IPI so the
117 * mm->context.id_lock has to be IRQ-safe.
118 */
119 raw_spin_lock_irqsave(&mm->context.id_lock, flags);
120 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
121 /* 158 /*
122 * Old version of ASID found. Set the new one and 159 * Our current ASID was active during a rollover, we can
123 * reset mm_cpumask(mm). 160 * continue to use it and this was just a false alarm.
124 */ 161 */
125 mm->context.id = asid; 162 asid = generation | (asid & ~ASID_MASK);
163 } else {
164 /*
165 * Allocate a free ASID. If we can't find one, take a
166 * note of the currently active ASIDs and mark the TLBs
167 * as requiring flushes.
168 */
169 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
170 if (asid == NUM_USER_ASIDS) {
171 generation = atomic64_add_return(ASID_FIRST_VERSION,
172 &asid_generation);
173 flush_context(cpu);
174 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
175 }
176 __set_bit(asid, asid_map);
177 asid = generation | IDX_TO_ASID(asid);
126 cpumask_clear(mm_cpumask(mm)); 178 cpumask_clear(mm_cpumask(mm));
127 } 179 }
128 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
129 180
130 /* 181 mm->context.id = asid;
131 * Set the mm_cpumask(mm) bit for the current CPU.
132 */
133 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
134} 182}
135 183
136/* 184void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
137 * Reset the ASID on the current CPU. This function call is broadcast
138 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
139 */
140static void reset_context(void *info)
141{ 185{
142 unsigned int asid; 186 unsigned long flags;
143 unsigned int cpu = smp_processor_id(); 187 unsigned int cpu = smp_processor_id();
144 struct mm_struct *mm = current->active_mm;
145 188
146 smp_rmb(); 189 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
147 asid = cpu_last_asid + cpu + 1; 190 __check_vmalloc_seq(mm);
148 191
149 flush_context(); 192 /*
150 set_mm_context(mm, asid); 193 * Required during context switch to avoid speculative page table
151 194 * walking with the wrong TTBR.
152 /* set the new ASID */ 195 */
153 cpu_switch_mm(mm->pgd, mm); 196 cpu_set_reserved_ttbr0();
154}
155 197
156#else 198 if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
199 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
200 goto switch_mm_fastpath;
157 201
158static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) 202 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
159{ 203 /* Check that our ASID belongs to the current generation. */
160 mm->context.id = asid; 204 if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
161 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); 205 new_context(mm, cpu);
162}
163 206
164#endif 207 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
208 cpumask_set_cpu(cpu, mm_cpumask(mm));
165 209
166void __new_context(struct mm_struct *mm) 210 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
167{ 211 local_flush_tlb_all();
168 unsigned int asid; 212 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
169 213
170 raw_spin_lock(&cpu_asid_lock); 214switch_mm_fastpath:
171#ifdef CONFIG_SMP 215 cpu_switch_mm(mm->pgd, mm);
172 /*
173 * Check the ASID again, in case the change was broadcast from
174 * another CPU before we acquired the lock.
175 */
176 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
177 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
178 raw_spin_unlock(&cpu_asid_lock);
179 return;
180 }
181#endif
182 /*
183 * At this point, it is guaranteed that the current mm (with
184 * an old ASID) isn't active on any other CPU since the ASIDs
185 * are changed simultaneously via IPI.
186 */
187 asid = ++cpu_last_asid;
188 if (asid == 0)
189 asid = cpu_last_asid = ASID_FIRST_VERSION;
190
191 /*
192 * If we've used up all our ASIDs, we need
193 * to start a new version and flush the TLB.
194 */
195 if (unlikely((asid & ~ASID_MASK) == 0)) {
196 asid = cpu_last_asid + smp_processor_id() + 1;
197 flush_context();
198#ifdef CONFIG_SMP
199 smp_wmb();
200 smp_call_function(reset_context, NULL, 1);
201#endif
202 cpu_last_asid += NR_CPUS;
203 }
204
205 set_mm_context(mm, asid);
206 raw_spin_unlock(&cpu_asid_lock);
207} 216}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 5dcc2fd46c4..88fd86cf3d9 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys,
47} 47}
48EXPORT_SYMBOL(ioremap_page); 48EXPORT_SYMBOL(ioremap_page);
49 49
50void __check_kvm_seq(struct mm_struct *mm) 50void __check_vmalloc_seq(struct mm_struct *mm)
51{ 51{
52 unsigned int seq; 52 unsigned int seq;
53 53
54 do { 54 do {
55 seq = init_mm.context.kvm_seq; 55 seq = init_mm.context.vmalloc_seq;
56 memcpy(pgd_offset(mm, VMALLOC_START), 56 memcpy(pgd_offset(mm, VMALLOC_START),
57 pgd_offset_k(VMALLOC_START), 57 pgd_offset_k(VMALLOC_START),
58 sizeof(pgd_t) * (pgd_index(VMALLOC_END) - 58 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
59 pgd_index(VMALLOC_START))); 59 pgd_index(VMALLOC_START)));
60 mm->context.kvm_seq = seq; 60 mm->context.vmalloc_seq = seq;
61 } while (seq != init_mm.context.kvm_seq); 61 } while (seq != init_mm.context.vmalloc_seq);
62} 62}
63 63
64#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 64#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
@@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
89 if (!pmd_none(pmd)) { 89 if (!pmd_none(pmd)) {
90 /* 90 /*
91 * Clear the PMD from the page table, and 91 * Clear the PMD from the page table, and
92 * increment the kvm sequence so others 92 * increment the vmalloc sequence so others
93 * notice this change. 93 * notice this change.
94 * 94 *
95 * Note: this is still racy on SMP machines. 95 * Note: this is still racy on SMP machines.
96 */ 96 */
97 pmd_clear(pmdp); 97 pmd_clear(pmdp);
98 init_mm.context.kvm_seq++; 98 init_mm.context.vmalloc_seq++;
99 99
100 /* 100 /*
101 * Free the page table, if there was one. 101 * Free the page table, if there was one.
@@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
112 * Ensure that the active_mm is up to date - we want to 112 * Ensure that the active_mm is up to date - we want to
113 * catch any use-after-iounmap cases. 113 * catch any use-after-iounmap cases.
114 */ 114 */
115 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) 115 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
116 __check_kvm_seq(current->active_mm); 116 __check_vmalloc_seq(current->active_mm);
117 117
118 flush_tlb_kernel_range(virt, end); 118 flush_tlb_kernel_range(virt, end);
119} 119}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 941dfb9e9a7..99b47b950ef 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -488,7 +488,7 @@ static void __init build_mem_type_table(void)
488#endif 488#endif
489 489
490 for (i = 0; i < 16; i++) { 490 for (i = 0; i < 16; i++) {
491 unsigned long v = pgprot_val(protection_map[i]); 491 pteval_t v = pgprot_val(protection_map[i]);
492 protection_map[i] = __pgprot(v | user_pgprot); 492 protection_map[i] = __pgprot(v | user_pgprot);
493 } 493 }
494 494
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index b29a2265af0..eb6aa73bc8b 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -167,6 +167,10 @@
167 tst r1, #L_PTE_YOUNG 167 tst r1, #L_PTE_YOUNG
168 tstne r1, #L_PTE_PRESENT 168 tstne r1, #L_PTE_PRESENT
169 moveq r3, #0 169 moveq r3, #0
170#ifndef CONFIG_CPU_USE_DOMAINS
171 tstne r1, #L_PTE_NONE
172 movne r3, #0
173#endif
170 174
171 str r3, [r0] 175 str r3, [r0]
172 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 176 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index fd045e70639..6d98c13ab82 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -100,7 +100,11 @@ ENTRY(cpu_v7_set_pte_ext)
100 orrne r3, r3, #PTE_EXT_XN 100 orrne r3, r3, #PTE_EXT_XN
101 101
102 tst r1, #L_PTE_YOUNG 102 tst r1, #L_PTE_YOUNG
103 tstne r1, #L_PTE_PRESENT 103 tstne r1, #L_PTE_VALID
104#ifndef CONFIG_CPU_USE_DOMAINS
105 eorne r1, r1, #L_PTE_NONE
106 tstne r1, #L_PTE_NONE
107#endif
104 moveq r3, #0 108 moveq r3, #0
105 109
106 ARM( str r3, [r0, #2048]! ) 110 ARM( str r3, [r0, #2048]! )
@@ -161,11 +165,11 @@ ENDPROC(cpu_v7_set_pte_ext)
161 * TFR EV X F I D LR S 165 * TFR EV X F I D LR S
162 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM 166 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
163 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced 167 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
164 * 1 0 110 0011 1100 .111 1101 < we want 168 * 01 0 110 0011 1100 .111 1101 < we want
165 */ 169 */
166 .align 2 170 .align 2
167 .type v7_crval, #object 171 .type v7_crval, #object
168v7_crval: 172v7_crval:
169 crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c 173 crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
170 174
171 .previous 175 .previous
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 8de0f1dd154..7b56386f949 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -65,8 +65,11 @@ ENDPROC(cpu_v7_switch_mm)
65 */ 65 */
66ENTRY(cpu_v7_set_pte_ext) 66ENTRY(cpu_v7_set_pte_ext)
67#ifdef CONFIG_MMU 67#ifdef CONFIG_MMU
68 tst r2, #L_PTE_PRESENT 68 tst r2, #L_PTE_VALID
69 beq 1f 69 beq 1f
70 tst r3, #1 << (57 - 32) @ L_PTE_NONE
71 bicne r2, #L_PTE_VALID
72 bne 1f
70 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY 73 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
71 orreq r2, #L_PTE_RDONLY 74 orreq r2, #L_PTE_RDONLY
721: strd r2, r3, [r0] 751: strd r2, r3, [r0]