aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/cputype.h13
-rw-r--r--arch/arm/include/asm/cti.h20
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h8
-rw-r--r--arch/arm/include/asm/mmu.h13
-rw-r--r--arch/arm/include/asm/mmu_context.h88
-rw-r--r--arch/arm/include/asm/percpu.h45
-rw-r--r--arch/arm/include/asm/perf_event.h7
-rw-r--r--arch/arm/include/asm/pgtable-2level.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level.h4
-rw-r--r--arch/arm/include/asm/pgtable.h10
-rw-r--r--arch/arm/include/asm/pmu.h28
-rw-r--r--arch/arm/include/asm/prom.h2
-rw-r--r--arch/arm/include/asm/smp_plat.h17
15 files changed, 123 insertions, 136 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index f70ae175a3d6..2ffdaacd461c 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += local64.h
16generic-y += msgbuf.h 16generic-y += msgbuf.h
17generic-y += param.h 17generic-y += param.h
18generic-y += parport.h 18generic-y += parport.h
19generic-y += percpu.h
20generic-y += poll.h 19generic-y += poll.h
21generic-y += resource.h 20generic-y += resource.h
22generic-y += sections.h 21generic-y += sections.h
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index d797223b39d5..2744f0602550 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -15,6 +15,7 @@
15 15
16struct cpuinfo_arm { 16struct cpuinfo_arm {
17 struct cpu cpu; 17 struct cpu cpu;
18 u32 cpuid;
18#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
19 unsigned int loops_per_jiffy; 20 unsigned int loops_per_jiffy;
20#endif 21#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cb47d28cbe1f..a59dcb5ab5fc 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -25,6 +25,19 @@
25#define CPUID_EXT_ISAR4 "c2, 4" 25#define CPUID_EXT_ISAR4 "c2, 4"
26#define CPUID_EXT_ISAR5 "c2, 5" 26#define CPUID_EXT_ISAR5 "c2, 5"
27 27
28#define MPIDR_SMP_BITMASK (0x3 << 30)
29#define MPIDR_SMP_VALUE (0x2 << 30)
30
31#define MPIDR_MT_BITMASK (0x1 << 24)
32
33#define MPIDR_HWID_BITMASK 0xFFFFFF
34
35#define MPIDR_LEVEL_BITS 8
36#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
37
38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
40
28extern unsigned int processor_id; 41extern unsigned int processor_id;
29 42
30#ifdef CONFIG_CPU_CP15 43#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
index a0ada3ea4358..f2e5cad3f306 100644
--- a/arch/arm/include/asm/cti.h
+++ b/arch/arm/include/asm/cti.h
@@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti)
146 */ 146 */
147static inline void cti_unlock(struct cti *cti) 147static inline void cti_unlock(struct cti *cti)
148{ 148{
149 void __iomem *base = cti->base; 149 __raw_writel(LOCKCODE, cti->base + LOCKACCESS);
150 unsigned long val;
151
152 val = __raw_readl(base + LOCKSTATUS);
153
154 if (val & 1) {
155 val = LOCKCODE;
156 __raw_writel(val, base + LOCKACCESS);
157 }
158} 150}
159 151
160/** 152/**
@@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti)
166 */ 158 */
167static inline void cti_lock(struct cti *cti) 159static inline void cti_lock(struct cti *cti)
168{ 160{
169 void __iomem *base = cti->base; 161 __raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
170 unsigned long val;
171
172 val = __raw_readl(base + LOCKSTATUS);
173
174 if (!(val & 1)) {
175 val = ~LOCKCODE;
176 __raw_writel(val, base + LOCKACCESS);
177 }
178} 162}
179#endif 163#endif
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index c190bc992f0e..01169dd723f1 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg,
98#define ARM_BASE_WCR 112 98#define ARM_BASE_WCR 112
99 99
100/* Accessor macros for the debug registers. */ 100/* Accessor macros for the debug registers. */
101#define ARM_DBG_READ(M, OP2, VAL) do {\ 101#define ARM_DBG_READ(N, M, OP2, VAL) do {\
102 asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ 102 asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\
103} while (0) 103} while (0)
104 104
105#define ARM_DBG_WRITE(M, OP2, VAL) do {\ 105#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\
106 asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ 106 asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
107} while (0) 107} while (0)
108 108
109struct notifier_block; 109struct notifier_block;
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 14965658a923..9f77e7804f3b 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,18 +5,15 @@
5 5
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 unsigned int id; 8 u64 id;
9 raw_spinlock_t id_lock;
10#endif 9#endif
11 unsigned int kvm_seq; 10 unsigned int vmalloc_seq;
12} mm_context_t; 11} mm_context_t;
13 12
14#ifdef CONFIG_CPU_HAS_ASID 13#ifdef CONFIG_CPU_HAS_ASID
15#define ASID(mm) ((mm)->context.id & 255) 14#define ASID_BITS 8
16 15#define ASID_MASK ((~0ULL) << ASID_BITS)
17/* init_mm.context.id_lock should be initialized. */ 16#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
18#define INIT_MM_CONTEXT(name) \
19 .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
20#else 17#else
21#define ASID(mm) (0) 18#define ASID(mm) (0)
22#endif 19#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 0306bc642c0d..e1f644bc7cc5 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -20,88 +20,12 @@
20#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
21#include <asm-generic/mm_hooks.h> 21#include <asm-generic/mm_hooks.h>
22 22
23void __check_kvm_seq(struct mm_struct *mm); 23void __check_vmalloc_seq(struct mm_struct *mm);
24 24
25#ifdef CONFIG_CPU_HAS_ASID 25#ifdef CONFIG_CPU_HAS_ASID
26 26
27/* 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28 * On ARMv6, we have the following structure in the Context ID: 28#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
29 *
30 * 31 7 0
31 * +-------------------------+-----------+
32 * | process ID | ASID |
33 * +-------------------------+-----------+
34 * | context ID |
35 * +-------------------------------------+
36 *
37 * The ASID is used to tag entries in the CPU caches and TLBs.
38 * The context ID is used by debuggers and trace logic, and
39 * should be unique within all running processes.
40 */
41#define ASID_BITS 8
42#define ASID_MASK ((~0) << ASID_BITS)
43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44
45extern unsigned int cpu_last_asid;
46
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm);
49void cpu_set_reserved_ttbr0(void);
50
51static inline void switch_new_context(struct mm_struct *mm)
52{
53 unsigned long flags;
54
55 __new_context(mm);
56
57 local_irq_save(flags);
58 cpu_switch_mm(mm->pgd, mm);
59 local_irq_restore(flags);
60}
61
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
64{
65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
66 __check_kvm_seq(mm);
67
68 /*
69 * Required during context switch to avoid speculative page table
70 * walking with the wrong TTBR.
71 */
72 cpu_set_reserved_ttbr0();
73
74 if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
75 /*
76 * The ASID is from the current generation, just switch to the
77 * new pgd. This condition is only true for calls from
78 * context_switch() and interrupts are already disabled.
79 */
80 cpu_switch_mm(mm->pgd, mm);
81 else if (irqs_disabled())
82 /*
83 * Defer the new ASID allocation until after the context
84 * switch critical region since __new_context() cannot be
85 * called with interrupts disabled (it sends IPIs).
86 */
87 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
88 else
89 /*
90 * That is a direct call to switch_mm() or activate_mm() with
91 * interrupts enabled and a new context.
92 */
93 switch_new_context(mm);
94}
95
96#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
97
98#define finish_arch_post_lock_switch \
99 finish_arch_post_lock_switch
100static inline void finish_arch_post_lock_switch(void)
101{
102 if (test_and_clear_thread_flag(TIF_SWITCH_MM))
103 switch_new_context(current->mm);
104}
105 29
106#else /* !CONFIG_CPU_HAS_ASID */ 30#else /* !CONFIG_CPU_HAS_ASID */
107 31
@@ -110,8 +34,8 @@ static inline void finish_arch_post_lock_switch(void)
110static inline void check_and_switch_context(struct mm_struct *mm, 34static inline void check_and_switch_context(struct mm_struct *mm,
111 struct task_struct *tsk) 35 struct task_struct *tsk)
112{ 36{
113 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 37 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
114 __check_kvm_seq(mm); 38 __check_vmalloc_seq(mm);
115 39
116 if (irqs_disabled()) 40 if (irqs_disabled())
117 /* 41 /*
@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void)
143#endif /* CONFIG_CPU_HAS_ASID */ 67#endif /* CONFIG_CPU_HAS_ASID */
144 68
145#define destroy_context(mm) do { } while(0) 69#define destroy_context(mm) do { } while(0)
70#define activate_mm(prev,next) switch_mm(prev, next, NULL)
146 71
147/* 72/*
148 * This is called when "tsk" is about to enter lazy TLB mode. 73 * This is called when "tsk" is about to enter lazy TLB mode.
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
186} 111}
187 112
188#define deactivate_mm(tsk,mm) do { } while (0) 113#define deactivate_mm(tsk,mm) do { } while (0)
189#define activate_mm(prev,next) switch_mm(prev, next, NULL)
190 114
191#endif 115#endif
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
new file mode 100644
index 000000000000..968c0a14e0a3
--- /dev/null
+++ b/arch/arm/include/asm/percpu.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef _ASM_ARM_PERCPU_H_
17#define _ASM_ARM_PERCPU_H_
18
19/*
20 * Same as asm-generic/percpu.h, except that we store the per cpu offset
21 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
22 */
23#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
24static inline void set_my_cpu_offset(unsigned long off)
25{
26 /* Set TPIDRPRW */
27 asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
28}
29
30static inline unsigned long __my_cpu_offset(void)
31{
32 unsigned long off;
33 /* Read TPIDRPRW */
34 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
35 return off;
36}
37#define __my_cpu_offset __my_cpu_offset()
38#else
39#define set_my_cpu_offset(x) do {} while(0)
40
41#endif /* CONFIG_SMP */
42
43#include <asm-generic/percpu.h>
44
45#endif /* _ASM_ARM_PERCPU_H_ */
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 625cd621a436..755877527cf9 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -21,4 +21,11 @@
21#define C(_x) PERF_COUNT_HW_CACHE_##_x 21#define C(_x) PERF_COUNT_HW_CACHE_##_x
22#define CACHE_OP_UNSUPPORTED 0xFFFF 22#define CACHE_OP_UNSUPPORTED 0xFFFF
23 23
24#ifdef CONFIG_HW_PERF_EVENTS
25struct pt_regs;
26extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
27extern unsigned long perf_misc_flags(struct pt_regs *regs);
28#define perf_misc_flags(regs) perf_misc_flags(regs)
29#endif
30
24#endif /* __ARM_PERF_EVENT_H__ */ 31#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 2317a71c8f8e..f97ee02386ee 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -115,6 +115,7 @@
115 * The PTE table pointer refers to the hardware entries; the "Linux" 115 * The PTE table pointer refers to the hardware entries; the "Linux"
116 * entries are stored 1024 bytes below. 116 * entries are stored 1024 bytes below.
117 */ 117 */
118#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
118#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) 119#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
119#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) 120#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
120#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 121#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
@@ -123,6 +124,7 @@
123#define L_PTE_USER (_AT(pteval_t, 1) << 8) 124#define L_PTE_USER (_AT(pteval_t, 1) << 8)
124#define L_PTE_XN (_AT(pteval_t, 1) << 9) 125#define L_PTE_XN (_AT(pteval_t, 1) << 9)
125#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ 126#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
127#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
126 128
127/* 129/*
128 * These are the memory types, defined to be compatible with 130 * These are the memory types, defined to be compatible with
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index b24903549d1c..a3f37929940a 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -67,7 +67,8 @@
67 * These bits overlap with the hardware bits but the naming is preserved for 67 * These bits overlap with the hardware bits but the naming is preserved for
68 * consistency with the classic page table format. 68 * consistency with the classic page table format.
69 */ 69 */
70#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ 70#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
71#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
71#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 72#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
72#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ 73#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
73#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ 74#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
@@ -76,6 +77,7 @@
76#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ 77#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
77#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ 78#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
78#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ 79#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
80#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
79 81
80/* 82/*
81 * To be used in assembly code with the upper page attributes. 83 * To be used in assembly code with the upper page attributes.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 08c12312a1f9..9c82f988c0e3 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel;
73 73
74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
75 75
76#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) 76#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
77#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) 77#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
78#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) 78#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
79#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 79#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
@@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel;
83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) 83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
84#define PAGE_KERNEL_EXEC pgprot_kernel 84#define PAGE_KERNEL_EXEC pgprot_kernel
85 85
86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) 86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
88#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 88#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
89#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 89#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
@@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
203#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) 203#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
204#define pte_special(pte) (0) 204#define pte_special(pte) (0)
205 205
206#define pte_present_user(pte) \ 206#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
207 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
208 (L_PTE_PRESENT | L_PTE_USER))
209 207
210#if __LINUX_ARM_ARCH__ < 6 208#if __LINUX_ARM_ARCH__ < 6
211static inline void __sync_icache_dcache(pte_t pteval) 209static inline void __sync_icache_dcache(pte_t pteval)
@@ -242,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
242 240
243static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 241static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
244{ 242{
245 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; 243 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
246 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 244 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
247 return pte; 245 return pte;
248} 246}
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index a26170dce02e..f24edad26c70 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -67,19 +67,19 @@ struct arm_pmu {
67 cpumask_t active_irqs; 67 cpumask_t active_irqs;
68 char *name; 68 char *name;
69 irqreturn_t (*handle_irq)(int irq_num, void *dev); 69 irqreturn_t (*handle_irq)(int irq_num, void *dev);
70 void (*enable)(struct hw_perf_event *evt, int idx); 70 void (*enable)(struct perf_event *event);
71 void (*disable)(struct hw_perf_event *evt, int idx); 71 void (*disable)(struct perf_event *event);
72 int (*get_event_idx)(struct pmu_hw_events *hw_events, 72 int (*get_event_idx)(struct pmu_hw_events *hw_events,
73 struct hw_perf_event *hwc); 73 struct perf_event *event);
74 int (*set_event_filter)(struct hw_perf_event *evt, 74 int (*set_event_filter)(struct hw_perf_event *evt,
75 struct perf_event_attr *attr); 75 struct perf_event_attr *attr);
76 u32 (*read_counter)(int idx); 76 u32 (*read_counter)(struct perf_event *event);
77 void (*write_counter)(int idx, u32 val); 77 void (*write_counter)(struct perf_event *event, u32 val);
78 void (*start)(void); 78 void (*start)(struct arm_pmu *);
79 void (*stop)(void); 79 void (*stop)(struct arm_pmu *);
80 void (*reset)(void *); 80 void (*reset)(void *);
81 int (*request_irq)(irq_handler_t handler); 81 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
82 void (*free_irq)(void); 82 void (*free_irq)(struct arm_pmu *);
83 int (*map_event)(struct perf_event *event); 83 int (*map_event)(struct perf_event *event);
84 int num_events; 84 int num_events;
85 atomic_t active_events; 85 atomic_t active_events;
@@ -93,15 +93,11 @@ struct arm_pmu {
93 93
94extern const struct dev_pm_ops armpmu_dev_pm_ops; 94extern const struct dev_pm_ops armpmu_dev_pm_ops;
95 95
96int armpmu_register(struct arm_pmu *armpmu, char *name, int type); 96int armpmu_register(struct arm_pmu *armpmu, int type);
97 97
98u64 armpmu_event_update(struct perf_event *event, 98u64 armpmu_event_update(struct perf_event *event);
99 struct hw_perf_event *hwc,
100 int idx);
101 99
102int armpmu_event_set_period(struct perf_event *event, 100int armpmu_event_set_period(struct perf_event *event);
103 struct hw_perf_event *hwc,
104 int idx);
105 101
106int armpmu_map_event(struct perf_event *event, 102int armpmu_map_event(struct perf_event *event,
107 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 103 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index aeae9c609df4..8dd51dc1a367 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -15,6 +15,7 @@
15 15
16extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); 16extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
17extern void arm_dt_memblock_reserve(void); 17extern void arm_dt_memblock_reserve(void);
18extern void __init arm_dt_init_cpu_maps(void);
18 19
19#else /* CONFIG_OF */ 20#else /* CONFIG_OF */
20 21
@@ -24,6 +25,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
24} 25}
25 26
26static inline void arm_dt_memblock_reserve(void) { } 27static inline void arm_dt_memblock_reserve(void) { }
28static inline void arm_dt_init_cpu_maps(void) { }
27 29
28#endif /* CONFIG_OF */ 30#endif /* CONFIG_OF */
29#endif /* ASMARM_PROM_H */ 31#endif /* ASMARM_PROM_H */
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 558d6c80aca9..aaa61b6f50ff 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -5,6 +5,9 @@
5#ifndef __ASMARM_SMP_PLAT_H 5#ifndef __ASMARM_SMP_PLAT_H
6#define __ASMARM_SMP_PLAT_H 6#define __ASMARM_SMP_PLAT_H
7 7
8#include <linux/cpumask.h>
9#include <linux/err.h>
10
8#include <asm/cputype.h> 11#include <asm/cputype.h>
9 12
10/* 13/*
@@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void)
48 */ 51 */
49extern int __cpu_logical_map[]; 52extern int __cpu_logical_map[];
50#define cpu_logical_map(cpu) __cpu_logical_map[cpu] 53#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
54/*
55 * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
56 * - mpidr: MPIDR[23:0] to be used for the look-up
57 *
58 * Returns the cpu logical index or -EINVAL on look-up error
59 */
60static inline int get_logical_index(u32 mpidr)
61{
62 int cpu;
63 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
64 if (cpu_logical_map(cpu) == mpidr)
65 return cpu;
66 return -EINVAL;
67}
51 68
52#endif 69#endif