aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/cputype.h13
-rw-r--r--arch/arm/include/asm/cti.h20
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h5
-rw-r--r--arch/arm/include/asm/hardware/vic.h2
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h8
-rw-r--r--arch/arm/include/asm/mach/serial_at91.h33
-rw-r--r--arch/arm/include/asm/mach/serial_sa1100.h31
-rw-r--r--arch/arm/include/asm/mach/udc_pxa2xx.h26
-rw-r--r--arch/arm/include/asm/mmu.h13
-rw-r--r--arch/arm/include/asm/mmu_context.h88
-rw-r--r--arch/arm/include/asm/percpu.h45
-rw-r--r--arch/arm/include/asm/perf_event.h7
-rw-r--r--arch/arm/include/asm/pgtable-2level.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level.h4
-rw-r--r--arch/arm/include/asm/pgtable.h10
-rw-r--r--arch/arm/include/asm/pmu.h28
-rw-r--r--arch/arm/include/asm/prom.h2
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/smp_plat.h17
-rw-r--r--arch/arm/include/asm/syscall.h9
-rw-r--r--arch/arm/include/asm/thread_info.h7
24 files changed, 151 insertions, 230 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 514e398f1a07..d3db39860b9c 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += local64.h
16generic-y += msgbuf.h 16generic-y += msgbuf.h
17generic-y += param.h 17generic-y += param.h
18generic-y += parport.h 18generic-y += parport.h
19generic-y += percpu.h
20generic-y += poll.h 19generic-y += poll.h
21generic-y += resource.h 20generic-y += resource.h
22generic-y += sections.h 21generic-y += sections.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 2ef95813fce0..eb87200aa4b5 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -250,6 +250,7 @@
250 * Beware, it also clobers LR. 250 * Beware, it also clobers LR.
251 */ 251 */
252.macro safe_svcmode_maskall reg:req 252.macro safe_svcmode_maskall reg:req
253#if __LINUX_ARM_ARCH__ >= 6
253 mrs \reg , cpsr 254 mrs \reg , cpsr
254 mov lr , \reg 255 mov lr , \reg
255 and lr , lr , #MODE_MASK 256 and lr , lr , #MODE_MASK
@@ -266,6 +267,13 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
266 __ERET 267 __ERET
2671: msr cpsr_c, \reg 2681: msr cpsr_c, \reg
2682: 2692:
270#else
271/*
272 * workaround for possibly broken pre-v6 hardware
273 * (akita, Sharp Zaurus C-1000, PXA270-based)
274 */
275 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
276#endif
269.endm 277.endm
270 278
271/* 279/*
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index d797223b39d5..2744f0602550 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -15,6 +15,7 @@
15 15
16struct cpuinfo_arm { 16struct cpuinfo_arm {
17 struct cpu cpu; 17 struct cpu cpu;
18 u32 cpuid;
18#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
19 unsigned int loops_per_jiffy; 20 unsigned int loops_per_jiffy;
20#endif 21#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cb47d28cbe1f..a59dcb5ab5fc 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -25,6 +25,19 @@
25#define CPUID_EXT_ISAR4 "c2, 4" 25#define CPUID_EXT_ISAR4 "c2, 4"
26#define CPUID_EXT_ISAR5 "c2, 5" 26#define CPUID_EXT_ISAR5 "c2, 5"
27 27
28#define MPIDR_SMP_BITMASK (0x3 << 30)
29#define MPIDR_SMP_VALUE (0x2 << 30)
30
31#define MPIDR_MT_BITMASK (0x1 << 24)
32
33#define MPIDR_HWID_BITMASK 0xFFFFFF
34
35#define MPIDR_LEVEL_BITS 8
36#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
37
38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
40
28extern unsigned int processor_id; 41extern unsigned int processor_id;
29 42
30#ifdef CONFIG_CPU_CP15 43#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
index a0ada3ea4358..f2e5cad3f306 100644
--- a/arch/arm/include/asm/cti.h
+++ b/arch/arm/include/asm/cti.h
@@ -146,15 +146,7 @@ static inline void cti_irq_ack(struct cti *cti)
146 */ 146 */
147static inline void cti_unlock(struct cti *cti) 147static inline void cti_unlock(struct cti *cti)
148{ 148{
149 void __iomem *base = cti->base; 149 __raw_writel(LOCKCODE, cti->base + LOCKACCESS);
150 unsigned long val;
151
152 val = __raw_readl(base + LOCKSTATUS);
153
154 if (val & 1) {
155 val = LOCKCODE;
156 __raw_writel(val, base + LOCKACCESS);
157 }
158} 150}
159 151
160/** 152/**
@@ -166,14 +158,6 @@ static inline void cti_unlock(struct cti *cti)
166 */ 158 */
167static inline void cti_lock(struct cti *cti) 159static inline void cti_lock(struct cti *cti)
168{ 160{
169 void __iomem *base = cti->base; 161 __raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
170 unsigned long val;
171
172 val = __raw_readl(base + LOCKSTATUS);
173
174 if (!(val & 1)) {
175 val = ~LOCKCODE;
176 __raw_writel(val, base + LOCKACCESS);
177 }
178} 162}
179#endif 163#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index c4c87bc12231..3b2c40b5bfa2 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -102,6 +102,10 @@
102 102
103#define L2X0_ADDR_FILTER_EN 1 103#define L2X0_ADDR_FILTER_EN 1
104 104
105#define L2X0_CTRL_EN 1
106
107#define L2X0_WAY_SIZE_SHIFT 3
108
105#ifndef __ASSEMBLY__ 109#ifndef __ASSEMBLY__
106extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); 110extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
107#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) 111#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
@@ -126,6 +130,7 @@ struct l2x0_regs {
126 unsigned long filter_end; 130 unsigned long filter_end;
127 unsigned long prefetch_ctrl; 131 unsigned long prefetch_ctrl;
128 unsigned long pwr_ctrl; 132 unsigned long pwr_ctrl;
133 unsigned long ctrl;
129}; 134};
130 135
131extern struct l2x0_regs l2x0_saved_regs; 136extern struct l2x0_regs l2x0_saved_regs;
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
index e14af1a1a320..2bebad36fc83 100644
--- a/arch/arm/include/asm/hardware/vic.h
+++ b/arch/arm/include/asm/hardware/vic.h
@@ -47,7 +47,7 @@
47struct device_node; 47struct device_node;
48struct pt_regs; 48struct pt_regs;
49 49
50void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, 50void __vic_init(void __iomem *base, int irq_start, u32 vic_sources,
51 u32 resume_sources, struct device_node *node); 51 u32 resume_sources, struct device_node *node);
52void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); 52void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
53int vic_of_init(struct device_node *node, struct device_node *parent); 53int vic_of_init(struct device_node *node, struct device_node *parent);
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index c190bc992f0e..01169dd723f1 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -98,12 +98,12 @@ static inline void decode_ctrl_reg(u32 reg,
98#define ARM_BASE_WCR 112 98#define ARM_BASE_WCR 112
99 99
100/* Accessor macros for the debug registers. */ 100/* Accessor macros for the debug registers. */
101#define ARM_DBG_READ(M, OP2, VAL) do {\ 101#define ARM_DBG_READ(N, M, OP2, VAL) do {\
102 asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ 102 asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\
103} while (0) 103} while (0)
104 104
105#define ARM_DBG_WRITE(M, OP2, VAL) do {\ 105#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\
106 asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ 106 asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\
107} while (0) 107} while (0)
108 108
109struct notifier_block; 109struct notifier_block;
diff --git a/arch/arm/include/asm/mach/serial_at91.h b/arch/arm/include/asm/mach/serial_at91.h
deleted file mode 100644
index ea6d063923b8..000000000000
--- a/arch/arm/include/asm/mach/serial_at91.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * arch/arm/include/asm/mach/serial_at91.h
3 *
4 * Based on serial_sa1100.h by Nicolas Pitre
5 *
6 * Copyright (C) 2002 ATMEL Rousset
7 *
8 * Low level machine dependent UART functions.
9 */
10
11struct uart_port;
12
13/*
14 * This is a temporary structure for registering these
15 * functions; it is intended to be discarded after boot.
16 */
17struct atmel_port_fns {
18 void (*set_mctrl)(struct uart_port *, u_int);
19 u_int (*get_mctrl)(struct uart_port *);
20 void (*enable_ms)(struct uart_port *);
21 void (*pm)(struct uart_port *, u_int, u_int);
22 int (*set_wake)(struct uart_port *, u_int);
23 int (*open)(struct uart_port *);
24 void (*close)(struct uart_port *);
25};
26
27#if defined(CONFIG_SERIAL_ATMEL)
28void atmel_register_uart_fns(struct atmel_port_fns *fns);
29#else
30#define atmel_register_uart_fns(fns) do { } while (0)
31#endif
32
33
diff --git a/arch/arm/include/asm/mach/serial_sa1100.h b/arch/arm/include/asm/mach/serial_sa1100.h
deleted file mode 100644
index d09064bf95a0..000000000000
--- a/arch/arm/include/asm/mach/serial_sa1100.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * arch/arm/include/asm/mach/serial_sa1100.h
3 *
4 * Author: Nicolas Pitre
5 *
6 * Moved and changed lots, Russell King
7 *
8 * Low level machine dependent UART functions.
9 */
10
11struct uart_port;
12struct uart_info;
13
14/*
15 * This is a temporary structure for registering these
16 * functions; it is intended to be discarded after boot.
17 */
18struct sa1100_port_fns {
19 void (*set_mctrl)(struct uart_port *, u_int);
20 u_int (*get_mctrl)(struct uart_port *);
21 void (*pm)(struct uart_port *, u_int, u_int);
22 int (*set_wake)(struct uart_port *, u_int);
23};
24
25#ifdef CONFIG_SERIAL_SA1100
26void sa1100_register_uart_fns(struct sa1100_port_fns *fns);
27void sa1100_register_uart(int idx, int port);
28#else
29#define sa1100_register_uart_fns(fns) do { } while (0)
30#define sa1100_register_uart(idx,port) do { } while (0)
31#endif
diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h
deleted file mode 100644
index ea297ac70bc6..000000000000
--- a/arch/arm/include/asm/mach/udc_pxa2xx.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * arch/arm/include/asm/mach/udc_pxa2xx.h
3 *
4 * This supports machine-specific differences in how the PXA2xx
5 * USB Device Controller (UDC) is wired.
6 *
7 * It is set in linux/arch/arm/mach-pxa/<machine>.c or in
8 * linux/arch/mach-ixp4xx/<machine>.c and used in
9 * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c
10 */
11
12struct pxa2xx_udc_mach_info {
13 int (*udc_is_connected)(void); /* do we see host? */
14 void (*udc_command)(int cmd);
15#define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */
16#define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */
17
18 /* Boards following the design guidelines in the developer's manual,
19 * with on-chip GPIOs not Lubbock's weird hardware, can have a sane
20 * VBUS IRQ and omit the methods above. Store the GPIO number
21 * here. Note that sometimes the signals go through inverters...
22 */
23 bool gpio_pullup_inverted;
24 int gpio_pullup; /* high == pullup activated */
25};
26
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 14965658a923..9f77e7804f3b 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,18 +5,15 @@
5 5
6typedef struct { 6typedef struct {
7#ifdef CONFIG_CPU_HAS_ASID 7#ifdef CONFIG_CPU_HAS_ASID
8 unsigned int id; 8 u64 id;
9 raw_spinlock_t id_lock;
10#endif 9#endif
11 unsigned int kvm_seq; 10 unsigned int vmalloc_seq;
12} mm_context_t; 11} mm_context_t;
13 12
14#ifdef CONFIG_CPU_HAS_ASID 13#ifdef CONFIG_CPU_HAS_ASID
15#define ASID(mm) ((mm)->context.id & 255) 14#define ASID_BITS 8
16 15#define ASID_MASK ((~0ULL) << ASID_BITS)
17/* init_mm.context.id_lock should be initialized. */ 16#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
18#define INIT_MM_CONTEXT(name) \
19 .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
20#else 17#else
21#define ASID(mm) (0) 18#define ASID(mm) (0)
22#endif 19#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 0306bc642c0d..e1f644bc7cc5 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -20,88 +20,12 @@
20#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
21#include <asm-generic/mm_hooks.h> 21#include <asm-generic/mm_hooks.h>
22 22
23void __check_kvm_seq(struct mm_struct *mm); 23void __check_vmalloc_seq(struct mm_struct *mm);
24 24
25#ifdef CONFIG_CPU_HAS_ASID 25#ifdef CONFIG_CPU_HAS_ASID
26 26
27/* 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28 * On ARMv6, we have the following structure in the Context ID: 28#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
29 *
30 * 31 7 0
31 * +-------------------------+-----------+
32 * | process ID | ASID |
33 * +-------------------------+-----------+
34 * | context ID |
35 * +-------------------------------------+
36 *
37 * The ASID is used to tag entries in the CPU caches and TLBs.
38 * The context ID is used by debuggers and trace logic, and
39 * should be unique within all running processes.
40 */
41#define ASID_BITS 8
42#define ASID_MASK ((~0) << ASID_BITS)
43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44
45extern unsigned int cpu_last_asid;
46
47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
48void __new_context(struct mm_struct *mm);
49void cpu_set_reserved_ttbr0(void);
50
51static inline void switch_new_context(struct mm_struct *mm)
52{
53 unsigned long flags;
54
55 __new_context(mm);
56
57 local_irq_save(flags);
58 cpu_switch_mm(mm->pgd, mm);
59 local_irq_restore(flags);
60}
61
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
64{
65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
66 __check_kvm_seq(mm);
67
68 /*
69 * Required during context switch to avoid speculative page table
70 * walking with the wrong TTBR.
71 */
72 cpu_set_reserved_ttbr0();
73
74 if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
75 /*
76 * The ASID is from the current generation, just switch to the
77 * new pgd. This condition is only true for calls from
78 * context_switch() and interrupts are already disabled.
79 */
80 cpu_switch_mm(mm->pgd, mm);
81 else if (irqs_disabled())
82 /*
83 * Defer the new ASID allocation until after the context
84 * switch critical region since __new_context() cannot be
85 * called with interrupts disabled (it sends IPIs).
86 */
87 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
88 else
89 /*
90 * That is a direct call to switch_mm() or activate_mm() with
91 * interrupts enabled and a new context.
92 */
93 switch_new_context(mm);
94}
95
96#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
97
98#define finish_arch_post_lock_switch \
99 finish_arch_post_lock_switch
100static inline void finish_arch_post_lock_switch(void)
101{
102 if (test_and_clear_thread_flag(TIF_SWITCH_MM))
103 switch_new_context(current->mm);
104}
105 29
106#else /* !CONFIG_CPU_HAS_ASID */ 30#else /* !CONFIG_CPU_HAS_ASID */
107 31
@@ -110,8 +34,8 @@ static inline void finish_arch_post_lock_switch(void)
110static inline void check_and_switch_context(struct mm_struct *mm, 34static inline void check_and_switch_context(struct mm_struct *mm,
111 struct task_struct *tsk) 35 struct task_struct *tsk)
112{ 36{
113 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 37 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
114 __check_kvm_seq(mm); 38 __check_vmalloc_seq(mm);
115 39
116 if (irqs_disabled()) 40 if (irqs_disabled())
117 /* 41 /*
@@ -143,6 +67,7 @@ static inline void finish_arch_post_lock_switch(void)
143#endif /* CONFIG_CPU_HAS_ASID */ 67#endif /* CONFIG_CPU_HAS_ASID */
144 68
145#define destroy_context(mm) do { } while(0) 69#define destroy_context(mm) do { } while(0)
70#define activate_mm(prev,next) switch_mm(prev, next, NULL)
146 71
147/* 72/*
148 * This is called when "tsk" is about to enter lazy TLB mode. 73 * This is called when "tsk" is about to enter lazy TLB mode.
@@ -186,6 +111,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
186} 111}
187 112
188#define deactivate_mm(tsk,mm) do { } while (0) 113#define deactivate_mm(tsk,mm) do { } while (0)
189#define activate_mm(prev,next) switch_mm(prev, next, NULL)
190 114
191#endif 115#endif
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
new file mode 100644
index 000000000000..968c0a14e0a3
--- /dev/null
+++ b/arch/arm/include/asm/percpu.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2012 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef _ASM_ARM_PERCPU_H_
17#define _ASM_ARM_PERCPU_H_
18
19/*
20 * Same as asm-generic/percpu.h, except that we store the per cpu offset
21 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
22 */
23#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
24static inline void set_my_cpu_offset(unsigned long off)
25{
26 /* Set TPIDRPRW */
27 asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
28}
29
30static inline unsigned long __my_cpu_offset(void)
31{
32 unsigned long off;
33 /* Read TPIDRPRW */
34 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory");
35 return off;
36}
37#define __my_cpu_offset __my_cpu_offset()
38#else
39#define set_my_cpu_offset(x) do {} while(0)
40
41#endif /* CONFIG_SMP */
42
43#include <asm-generic/percpu.h>
44
45#endif /* _ASM_ARM_PERCPU_H_ */
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 625cd621a436..755877527cf9 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -21,4 +21,11 @@
21#define C(_x) PERF_COUNT_HW_CACHE_##_x 21#define C(_x) PERF_COUNT_HW_CACHE_##_x
22#define CACHE_OP_UNSUPPORTED 0xFFFF 22#define CACHE_OP_UNSUPPORTED 0xFFFF
23 23
24#ifdef CONFIG_HW_PERF_EVENTS
25struct pt_regs;
26extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
27extern unsigned long perf_misc_flags(struct pt_regs *regs);
28#define perf_misc_flags(regs) perf_misc_flags(regs)
29#endif
30
24#endif /* __ARM_PERF_EVENT_H__ */ 31#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 2317a71c8f8e..f97ee02386ee 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -115,6 +115,7 @@
115 * The PTE table pointer refers to the hardware entries; the "Linux" 115 * The PTE table pointer refers to the hardware entries; the "Linux"
116 * entries are stored 1024 bytes below. 116 * entries are stored 1024 bytes below.
117 */ 117 */
118#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
118#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) 119#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
119#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) 120#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
120#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 121#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
@@ -123,6 +124,7 @@
123#define L_PTE_USER (_AT(pteval_t, 1) << 8) 124#define L_PTE_USER (_AT(pteval_t, 1) << 8)
124#define L_PTE_XN (_AT(pteval_t, 1) << 9) 125#define L_PTE_XN (_AT(pteval_t, 1) << 9)
125#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ 126#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
127#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
126 128
127/* 129/*
128 * These are the memory types, defined to be compatible with 130 * These are the memory types, defined to be compatible with
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index b24903549d1c..a3f37929940a 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -67,7 +67,8 @@
67 * These bits overlap with the hardware bits but the naming is preserved for 67 * These bits overlap with the hardware bits but the naming is preserved for
68 * consistency with the classic page table format. 68 * consistency with the classic page table format.
69 */ 69 */
70#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ 70#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
71#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
71#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 72#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
72#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ 73#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
73#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ 74#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
@@ -76,6 +77,7 @@
76#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ 77#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
77#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ 78#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
78#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ 79#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
80#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
79 81
80/* 82/*
81 * To be used in assembly code with the upper page attributes. 83 * To be used in assembly code with the upper page attributes.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 08c12312a1f9..9c82f988c0e3 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -73,7 +73,7 @@ extern pgprot_t pgprot_kernel;
73 73
74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 74#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
75 75
76#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) 76#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
77#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) 77#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
78#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) 78#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
79#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 79#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
@@ -83,7 +83,7 @@ extern pgprot_t pgprot_kernel;
83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) 83#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
84#define PAGE_KERNEL_EXEC pgprot_kernel 84#define PAGE_KERNEL_EXEC pgprot_kernel
85 85
86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) 86#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 87#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
88#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 88#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
89#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 89#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
@@ -203,9 +203,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
203#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) 203#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
204#define pte_special(pte) (0) 204#define pte_special(pte) (0)
205 205
206#define pte_present_user(pte) \ 206#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
207 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
208 (L_PTE_PRESENT | L_PTE_USER))
209 207
210#if __LINUX_ARM_ARCH__ < 6 208#if __LINUX_ARM_ARCH__ < 6
211static inline void __sync_icache_dcache(pte_t pteval) 209static inline void __sync_icache_dcache(pte_t pteval)
@@ -242,7 +240,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
242 240
243static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 241static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
244{ 242{
245 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; 243 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
246 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 244 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
247 return pte; 245 return pte;
248} 246}
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index a26170dce02e..f24edad26c70 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -67,19 +67,19 @@ struct arm_pmu {
67 cpumask_t active_irqs; 67 cpumask_t active_irqs;
68 char *name; 68 char *name;
69 irqreturn_t (*handle_irq)(int irq_num, void *dev); 69 irqreturn_t (*handle_irq)(int irq_num, void *dev);
70 void (*enable)(struct hw_perf_event *evt, int idx); 70 void (*enable)(struct perf_event *event);
71 void (*disable)(struct hw_perf_event *evt, int idx); 71 void (*disable)(struct perf_event *event);
72 int (*get_event_idx)(struct pmu_hw_events *hw_events, 72 int (*get_event_idx)(struct pmu_hw_events *hw_events,
73 struct hw_perf_event *hwc); 73 struct perf_event *event);
74 int (*set_event_filter)(struct hw_perf_event *evt, 74 int (*set_event_filter)(struct hw_perf_event *evt,
75 struct perf_event_attr *attr); 75 struct perf_event_attr *attr);
76 u32 (*read_counter)(int idx); 76 u32 (*read_counter)(struct perf_event *event);
77 void (*write_counter)(int idx, u32 val); 77 void (*write_counter)(struct perf_event *event, u32 val);
78 void (*start)(void); 78 void (*start)(struct arm_pmu *);
79 void (*stop)(void); 79 void (*stop)(struct arm_pmu *);
80 void (*reset)(void *); 80 void (*reset)(void *);
81 int (*request_irq)(irq_handler_t handler); 81 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
82 void (*free_irq)(void); 82 void (*free_irq)(struct arm_pmu *);
83 int (*map_event)(struct perf_event *event); 83 int (*map_event)(struct perf_event *event);
84 int num_events; 84 int num_events;
85 atomic_t active_events; 85 atomic_t active_events;
@@ -93,15 +93,11 @@ struct arm_pmu {
93 93
94extern const struct dev_pm_ops armpmu_dev_pm_ops; 94extern const struct dev_pm_ops armpmu_dev_pm_ops;
95 95
96int armpmu_register(struct arm_pmu *armpmu, char *name, int type); 96int armpmu_register(struct arm_pmu *armpmu, int type);
97 97
98u64 armpmu_event_update(struct perf_event *event, 98u64 armpmu_event_update(struct perf_event *event);
99 struct hw_perf_event *hwc,
100 int idx);
101 99
102int armpmu_event_set_period(struct perf_event *event, 100int armpmu_event_set_period(struct perf_event *event);
103 struct hw_perf_event *hwc,
104 int idx);
105 101
106int armpmu_map_event(struct perf_event *event, 102int armpmu_map_event(struct perf_event *event,
107 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 103 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index 6d65ba222db9..a219227c3e43 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -17,6 +17,7 @@
17 17
18extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); 18extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
19extern void arm_dt_memblock_reserve(void); 19extern void arm_dt_memblock_reserve(void);
20extern void __init arm_dt_init_cpu_maps(void);
20 21
21#else /* CONFIG_OF */ 22#else /* CONFIG_OF */
22 23
@@ -26,6 +27,7 @@ static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
26} 27}
27 28
28static inline void arm_dt_memblock_reserve(void) { } 29static inline void arm_dt_memblock_reserve(void) { }
30static inline void arm_dt_init_cpu_maps(void) { }
29 31
30#endif /* CONFIG_OF */ 32#endif /* CONFIG_OF */
31#endif /* ASMARM_PROM_H */ 33#endif /* ASMARM_PROM_H */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2e3be16c6766..d3a22bebe6ce 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -79,6 +79,7 @@ extern void cpu_die(void);
79 79
80extern void arch_send_call_function_single_ipi(int cpu); 80extern void arch_send_call_function_single_ipi(int cpu);
81extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 81extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
82extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
82 83
83struct smp_operations { 84struct smp_operations {
84#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 558d6c80aca9..aaa61b6f50ff 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -5,6 +5,9 @@
5#ifndef __ASMARM_SMP_PLAT_H 5#ifndef __ASMARM_SMP_PLAT_H
6#define __ASMARM_SMP_PLAT_H 6#define __ASMARM_SMP_PLAT_H
7 7
8#include <linux/cpumask.h>
9#include <linux/err.h>
10
8#include <asm/cputype.h> 11#include <asm/cputype.h>
9 12
10/* 13/*
@@ -48,5 +51,19 @@ static inline int cache_ops_need_broadcast(void)
48 */ 51 */
49extern int __cpu_logical_map[]; 52extern int __cpu_logical_map[];
50#define cpu_logical_map(cpu) __cpu_logical_map[cpu] 53#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
54/*
55 * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
56 * - mpidr: MPIDR[23:0] to be used for the look-up
57 *
58 * Returns the cpu logical index or -EINVAL on look-up error
59 */
60static inline int get_logical_index(u32 mpidr)
61{
62 int cpu;
63 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
64 if (cpu_logical_map(cpu) == mpidr)
65 return cpu;
66 return -EINVAL;
67}
51 68
52#endif 69#endif
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index 9fdded6b1089..f1d96d4e8092 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -7,6 +7,8 @@
7#ifndef _ASM_ARM_SYSCALL_H 7#ifndef _ASM_ARM_SYSCALL_H
8#define _ASM_ARM_SYSCALL_H 8#define _ASM_ARM_SYSCALL_H
9 9
10#include <linux/audit.h> /* for AUDIT_ARCH_* */
11#include <linux/elf.h> /* for ELF_EM */
10#include <linux/err.h> 12#include <linux/err.h>
11#include <linux/sched.h> 13#include <linux/sched.h>
12 14
@@ -95,4 +97,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
95 memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0])); 97 memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
96} 98}
97 99
100static inline int syscall_get_arch(struct task_struct *task,
101 struct pt_regs *regs)
102{
103 /* ARM tasks don't change audit architectures on the fly. */
104 return AUDIT_ARCH_ARM;
105}
106
98#endif /* _ASM_ARM_SYSCALL_H */ 107#endif /* _ASM_ARM_SYSCALL_H */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 8477b4c1d39f..cddda1f41f0f 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -151,10 +151,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
151#define TIF_SYSCALL_TRACE 8 151#define TIF_SYSCALL_TRACE 8
152#define TIF_SYSCALL_AUDIT 9 152#define TIF_SYSCALL_AUDIT 9
153#define TIF_SYSCALL_TRACEPOINT 10 153#define TIF_SYSCALL_TRACEPOINT 10
154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
154#define TIF_USING_IWMMXT 17 155#define TIF_USING_IWMMXT 17
155#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 156#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
156#define TIF_RESTORE_SIGMASK 20 157#define TIF_RESTORE_SIGMASK 20
157#define TIF_SECCOMP 21
158#define TIF_SWITCH_MM 22 /* deferred switch_mm */ 158#define TIF_SWITCH_MM 22 /* deferred switch_mm */
159 159
160#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 160#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -163,11 +163,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
163#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 163#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
164#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 164#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
165#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 165#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
166#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
167#define _TIF_SECCOMP (1 << TIF_SECCOMP) 166#define _TIF_SECCOMP (1 << TIF_SECCOMP)
167#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
168 168
169/* Checks for any syscall work in entry-common.S */ 169/* Checks for any syscall work in entry-common.S */
170#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) 170#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
171 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
171 172
172/* 173/*
173 * Change these and you break ASM code in entry-common.S 174 * Change these and you break ASM code in entry-common.S