aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-09-16 16:45:16 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-09-16 16:45:16 -0400
commit4722cd7741c6404f967f7a7b8b666540b6c1663e (patch)
tree877b7d8efe1e4e4ce48416186b4f45da3a5fccac /arch
parent1db3706b05b11abcf2673ffbed5ad43b4c90ed11 (diff)
parent4fb0d2ea397ab207fdecbd88ad0e37b36ce68a62 (diff)
Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6-wd into devel-stable
Conflicts: arch/arm/mach-imx/mach-cpuimx27.c
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/boot/compressed/mmcif-sh7372.c2
-rw-r--r--arch/arm/boot/compressed/sdhi-sh7372.c2
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h2
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h2
-rw-r--r--arch/arm/include/asm/pmu.h97
-rw-r--r--arch/arm/kernel/hw_breakpoint.c270
-rw-r--r--arch/arm/kernel/perf_event.c475
-rw-r--r--arch/arm/kernel/perf_event_v6.c87
-rw-r--r--arch/arm/kernel/perf_event_v7.c395
-rw-r--r--arch/arm/kernel/perf_event_xscale.c90
-rw-r--r--arch/arm/kernel/pmu.c186
-rw-r--r--arch/arm/kernel/relocate_kernel.S3
-rw-r--r--arch/arm/kernel/setup.c15
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/mach-at91/at91sam9261.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ts72xx.h26
-rw-r--r--arch/arm/mach-exynos4/clock.c2
-rw-r--r--arch/arm/mach-exynos4/cpu.c11
-rw-r--r--arch/arm/mach-exynos4/include/mach/irqs.h5
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-pmu.h2
-rw-r--r--arch/arm/mach-exynos4/irq-eint.c7
-rw-r--r--arch/arm/mach-exynos4/mach-universal_c210.c4
-rw-r--r--arch/arm/mach-exynos4/setup-usb-phy.c2
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-footbridge/dc21285.c1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c2
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c2
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c2
-rw-r--r--arch/arm/mach-orion5x/pci.c1
-rw-r--r--arch/arm/mach-realview/include/mach/system.h1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c1
-rw-r--r--arch/arm/mach-s5p64x0/irq-eint.c2
-rw-r--r--arch/arm/mach-s5pv210/pm.c2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c3
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c4
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c29
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c2
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h4
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c7
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c176
-rw-r--r--arch/arm/mach-vexpress/v2m.c7
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S10
-rw-r--r--arch/arm/mm/proc-v6.S16
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/mm/proc-xsc3.S6
-rw-r--r--arch/arm/plat-s5p/clock.c2
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c6
-rw-r--r--arch/arm/plat-samsung/include/plat/backlight.h2
-rw-r--r--arch/arm/plat-samsung/irq-vic-timer.c5
-rw-r--r--arch/arm/tools/mach-types6
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/sh/include/asm/ptrace.h2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c1
-rw-r--r--arch/sh/kernel/idle.c2
-rw-r--r--arch/sh/kernel/traps_32.c37
-rw-r--r--arch/sparc/kernel/irq.h2
-rw-r--r--arch/sparc/kernel/systbls_64.S2
62 files changed, 1190 insertions, 875 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 73c320ea172c..5a3a78633177 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1271,6 +1271,18 @@ config ARM_ERRATA_754327
1271 This workaround defines cpu_relax() as smp_mb(), preventing correctly 1271 This workaround defines cpu_relax() as smp_mb(), preventing correctly
1272 written polling loops from denying visibility of updates to memory. 1272 written polling loops from denying visibility of updates to memory.
1273 1273
1274config ARM_ERRATA_364296
1275 bool "ARM errata: Possible cache data corruption with hit-under-miss enabled"
1276 depends on CPU_V6 && !SMP
1277 help
1278 This options enables the workaround for the 364296 ARM1136
1279 r0p2 erratum (possible cache data corruption with
1280 hit-under-miss enabled). It sets the undocumented bit 31 in
1281 the auxiliary control register and the FI bit in the control
1282 register, thus disabling hit-under-miss without putting the
1283 processor into full low interrupt latency mode. ARM11MPCore
1284 is not affected.
1285
1274endmenu 1286endmenu
1275 1287
1276source "arch/arm/common/Kconfig" 1288source "arch/arm/common/Kconfig"
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c
index b6f61d9a5a1b..672ae95db5c3 100644
--- a/arch/arm/boot/compressed/mmcif-sh7372.c
+++ b/arch/arm/boot/compressed/mmcif-sh7372.c
@@ -82,7 +82,7 @@ asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
82 82
83 83
84 /* Disable clock to MMC hardware block */ 84 /* Disable clock to MMC hardware block */
85 __raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3); 85 __raw_writel(__raw_readl(SMSTPCR3) | (1 << 12), SMSTPCR3);
86 86
87 mmc_update_progress(MMC_PROGRESS_DONE); 87 mmc_update_progress(MMC_PROGRESS_DONE);
88} 88}
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c
index d403a8b24d7f..d279294f2381 100644
--- a/arch/arm/boot/compressed/sdhi-sh7372.c
+++ b/arch/arm/boot/compressed/sdhi-sh7372.c
@@ -85,7 +85,7 @@ asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
85 goto err; 85 goto err;
86 86
87 /* Disable clock to SDHI1 hardware block */ 87 /* Disable clock to SDHI1 hardware block */
88 __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3); 88 __raw_writel(__raw_readl(SMSTPCR3) | (1 << 13), SMSTPCR3);
89 89
90 mmc_update_progress(MMC_PROGRESS_DONE); 90 mmc_update_progress(MMC_PROGRESS_DONE);
91 91
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 16bd48031583..bfa706ffd968 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -64,7 +64,7 @@
64#define L2X0_AUX_CTRL_MASK 0xc0000fff 64#define L2X0_AUX_CTRL_MASK 0xc0000fff
65#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 65#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
66#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 66#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
67#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17) 67#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
68#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 68#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
69#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 69#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
70#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 70#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index f389b2704d82..c190bc992f0e 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -50,6 +50,7 @@ static inline void decode_ctrl_reg(u32 reg,
50#define ARM_DEBUG_ARCH_V6_1 2 50#define ARM_DEBUG_ARCH_V6_1 2
51#define ARM_DEBUG_ARCH_V7_ECP14 3 51#define ARM_DEBUG_ARCH_V7_ECP14 3
52#define ARM_DEBUG_ARCH_V7_MM 4 52#define ARM_DEBUG_ARCH_V7_MM 4
53#define ARM_DEBUG_ARCH_V7_1 5
53 54
54/* Breakpoint */ 55/* Breakpoint */
55#define ARM_BREAKPOINT_EXECUTE 0 56#define ARM_BREAKPOINT_EXECUTE 0
@@ -57,6 +58,7 @@ static inline void decode_ctrl_reg(u32 reg,
57/* Watchpoints */ 58/* Watchpoints */
58#define ARM_BREAKPOINT_LOAD 1 59#define ARM_BREAKPOINT_LOAD 1
59#define ARM_BREAKPOINT_STORE 2 60#define ARM_BREAKPOINT_STORE 2
61#define ARM_FSR_ACCESS_MASK (1 << 11)
60 62
61/* Privilege Levels */ 63/* Privilege Levels */
62#define ARM_BREAKPOINT_PRIV 1 64#define ARM_BREAKPOINT_PRIV 1
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 67c70a31a1be..71d99b83cdb9 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -13,7 +13,12 @@
13#define __ARM_PMU_H__ 13#define __ARM_PMU_H__
14 14
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/perf_event.h>
16 17
18/*
19 * Types of PMUs that can be accessed directly and require mutual
20 * exclusion between profiling tools.
21 */
17enum arm_pmu_type { 22enum arm_pmu_type {
18 ARM_PMU_DEVICE_CPU = 0, 23 ARM_PMU_DEVICE_CPU = 0,
19 ARM_NUM_PMU_DEVICES, 24 ARM_NUM_PMU_DEVICES,
@@ -37,21 +42,17 @@ struct arm_pmu_platdata {
37 * reserve_pmu() - reserve the hardware performance counters 42 * reserve_pmu() - reserve the hardware performance counters
38 * 43 *
39 * Reserve the hardware performance counters in the system for exclusive use. 44 * Reserve the hardware performance counters in the system for exclusive use.
40 * The platform_device for the system is returned on success, ERR_PTR() 45 * Returns 0 on success or -EBUSY if the lock is already held.
41 * encoded error on failure.
42 */ 46 */
43extern struct platform_device * 47extern int
44reserve_pmu(enum arm_pmu_type device); 48reserve_pmu(enum arm_pmu_type type);
45 49
46/** 50/**
47 * release_pmu() - Relinquish control of the performance counters 51 * release_pmu() - Relinquish control of the performance counters
48 * 52 *
49 * Release the performance counters and allow someone else to use them. 53 * Release the performance counters and allow someone else to use them.
50 * Callers must have disabled the counters and released IRQs before calling
51 * this. The platform_device returned from reserve_pmu() must be passed as
52 * a cookie.
53 */ 54 */
54extern int 55extern void
55release_pmu(enum arm_pmu_type type); 56release_pmu(enum arm_pmu_type type);
56 57
57/** 58/**
@@ -62,30 +63,84 @@ release_pmu(enum arm_pmu_type type);
62 * the actual hardware initialisation. 63 * the actual hardware initialisation.
63 */ 64 */
64extern int 65extern int
65init_pmu(enum arm_pmu_type device); 66init_pmu(enum arm_pmu_type type);
66 67
67#else /* CONFIG_CPU_HAS_PMU */ 68#else /* CONFIG_CPU_HAS_PMU */
68 69
69#include <linux/err.h> 70#include <linux/err.h>
70 71
71static inline struct platform_device *
72reserve_pmu(enum arm_pmu_type device)
73{
74 return ERR_PTR(-ENODEV);
75}
76
77static inline int 72static inline int
78release_pmu(struct platform_device *pdev) 73reserve_pmu(enum arm_pmu_type type)
79{ 74{
80 return -ENODEV; 75 return -ENODEV;
81} 76}
82 77
83static inline int 78static inline void
84init_pmu(enum arm_pmu_type device) 79release_pmu(enum arm_pmu_type type) { }
85{
86 return -ENODEV;
87}
88 80
89#endif /* CONFIG_CPU_HAS_PMU */ 81#endif /* CONFIG_CPU_HAS_PMU */
90 82
83#ifdef CONFIG_HW_PERF_EVENTS
84
85/* The events for a given PMU register set. */
86struct pmu_hw_events {
87 /*
88 * The events that are active on the PMU for the given index.
89 */
90 struct perf_event **events;
91
92 /*
93 * A 1 bit for an index indicates that the counter is being used for
94 * an event. A 0 means that the counter can be used.
95 */
96 unsigned long *used_mask;
97
98 /*
99 * Hardware lock to serialize accesses to PMU registers. Needed for the
100 * read/modify/write sequences.
101 */
102 raw_spinlock_t pmu_lock;
103};
104
105struct arm_pmu {
106 struct pmu pmu;
107 enum arm_perf_pmu_ids id;
108 enum arm_pmu_type type;
109 cpumask_t active_irqs;
110 const char *name;
111 irqreturn_t (*handle_irq)(int irq_num, void *dev);
112 void (*enable)(struct hw_perf_event *evt, int idx);
113 void (*disable)(struct hw_perf_event *evt, int idx);
114 int (*get_event_idx)(struct pmu_hw_events *hw_events,
115 struct hw_perf_event *hwc);
116 int (*set_event_filter)(struct hw_perf_event *evt,
117 struct perf_event_attr *attr);
118 u32 (*read_counter)(int idx);
119 void (*write_counter)(int idx, u32 val);
120 void (*start)(void);
121 void (*stop)(void);
122 void (*reset)(void *);
123 int (*map_event)(struct perf_event *event);
124 int num_events;
125 atomic_t active_events;
126 struct mutex reserve_mutex;
127 u64 max_period;
128 struct platform_device *plat_device;
129 struct pmu_hw_events *(*get_hw_events)(void);
130};
131
132#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
133
134int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
135
136u64 armpmu_event_update(struct perf_event *event,
137 struct hw_perf_event *hwc,
138 int idx, int overflow);
139
140int armpmu_event_set_period(struct perf_event *event,
141 struct hw_perf_event *hwc,
142 int idx);
143
144#endif /* CONFIG_HW_PERF_EVENTS */
145
91#endif /* __ARM_PMU_H__ */ 146#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index a927ca1f5566..5a46225f007e 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -45,7 +45,6 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
45 45
46/* Number of BRP/WRP registers on this CPU. */ 46/* Number of BRP/WRP registers on this CPU. */
47static int core_num_brps; 47static int core_num_brps;
48static int core_num_reserved_brps;
49static int core_num_wrps; 48static int core_num_wrps;
50 49
51/* Debug architecture version. */ 50/* Debug architecture version. */
@@ -137,10 +136,11 @@ static u8 get_debug_arch(void)
137 u32 didr; 136 u32 didr;
138 137
139 /* Do we implement the extended CPUID interface? */ 138 /* Do we implement the extended CPUID interface? */
140 if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), 139 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
141 "CPUID feature registers not supported. " 140 pr_warning("CPUID feature registers not supported. "
142 "Assuming v6 debug is present.\n")) 141 "Assuming v6 debug is present.\n");
143 return ARM_DEBUG_ARCH_V6; 142 return ARM_DEBUG_ARCH_V6;
143 }
144 144
145 ARM_DBG_READ(c0, 0, didr); 145 ARM_DBG_READ(c0, 0, didr);
146 return (didr >> 16) & 0xf; 146 return (didr >> 16) & 0xf;
@@ -154,10 +154,21 @@ u8 arch_get_debug_arch(void)
154static int debug_arch_supported(void) 154static int debug_arch_supported(void)
155{ 155{
156 u8 arch = get_debug_arch(); 156 u8 arch = get_debug_arch();
157 return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; 157
158 /* We don't support the memory-mapped interface. */
159 return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
160 arch >= ARM_DEBUG_ARCH_V7_1;
161}
162
163/* Determine number of WRP registers available. */
164static int get_num_wrp_resources(void)
165{
166 u32 didr;
167 ARM_DBG_READ(c0, 0, didr);
168 return ((didr >> 28) & 0xf) + 1;
158} 169}
159 170
160/* Determine number of BRP register available. */ 171/* Determine number of BRP registers available. */
161static int get_num_brp_resources(void) 172static int get_num_brp_resources(void)
162{ 173{
163 u32 didr; 174 u32 didr;
@@ -176,9 +187,10 @@ static int core_has_mismatch_brps(void)
176static int get_num_wrps(void) 187static int get_num_wrps(void)
177{ 188{
178 /* 189 /*
179 * FIXME: When a watchpoint fires, the only way to work out which 190 * On debug architectures prior to 7.1, when a watchpoint fires, the
180 * watchpoint it was is by disassembling the faulting instruction 191 * only way to work out which watchpoint it was is by disassembling
181 * and working out the address of the memory access. 192 * the faulting instruction and working out the address of the memory
193 * access.
182 * 194 *
183 * Furthermore, we can only do this if the watchpoint was precise 195 * Furthermore, we can only do this if the watchpoint was precise
184 * since imprecise watchpoints prevent us from calculating register 196 * since imprecise watchpoints prevent us from calculating register
@@ -192,36 +204,17 @@ static int get_num_wrps(void)
192 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows 204 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
193 * that it is set on some implementations]. 205 * that it is set on some implementations].
194 */ 206 */
207 if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
208 return 1;
195 209
196#if 0 210 return get_num_wrp_resources();
197 int wrps;
198 u32 didr;
199 ARM_DBG_READ(c0, 0, didr);
200 wrps = ((didr >> 28) & 0xf) + 1;
201#endif
202 int wrps = 1;
203
204 if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
205 wrps = get_num_brp_resources() - 1;
206
207 return wrps;
208}
209
210/* We reserve one breakpoint for each watchpoint. */
211static int get_num_reserved_brps(void)
212{
213 if (core_has_mismatch_brps())
214 return get_num_wrps();
215 return 0;
216} 211}
217 212
218/* Determine number of usable BRPs available. */ 213/* Determine number of usable BRPs available. */
219static int get_num_brps(void) 214static int get_num_brps(void)
220{ 215{
221 int brps = get_num_brp_resources(); 216 int brps = get_num_brp_resources();
222 if (core_has_mismatch_brps()) 217 return core_has_mismatch_brps() ? brps - 1 : brps;
223 brps -= get_num_reserved_brps();
224 return brps;
225} 218}
226 219
227/* 220/*
@@ -239,7 +232,7 @@ static int enable_monitor_mode(void)
239 232
240 /* Ensure that halting mode is disabled. */ 233 /* Ensure that halting mode is disabled. */
241 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, 234 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN,
242 "halting debug mode enabled. Unable to access hardware resources.\n")) { 235 "halting debug mode enabled. Unable to access hardware resources.\n")) {
243 ret = -EPERM; 236 ret = -EPERM;
244 goto out; 237 goto out;
245 } 238 }
@@ -255,6 +248,7 @@ static int enable_monitor_mode(void)
255 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); 248 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
256 break; 249 break;
257 case ARM_DEBUG_ARCH_V7_ECP14: 250 case ARM_DEBUG_ARCH_V7_ECP14:
251 case ARM_DEBUG_ARCH_V7_1:
258 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); 252 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
259 break; 253 break;
260 default: 254 default:
@@ -346,24 +340,10 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
346 val_base = ARM_BASE_BVR; 340 val_base = ARM_BASE_BVR;
347 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 341 slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
348 max_slots = core_num_brps; 342 max_slots = core_num_brps;
349 if (info->step_ctrl.enabled) {
350 /* Override the breakpoint data with the step data. */
351 addr = info->trigger & ~0x3;
352 ctrl = encode_ctrl_reg(info->step_ctrl);
353 }
354 } else { 343 } else {
355 /* Watchpoint */ 344 /* Watchpoint */
356 if (info->step_ctrl.enabled) { 345 ctrl_base = ARM_BASE_WCR;
357 /* Install into the reserved breakpoint region. */ 346 val_base = ARM_BASE_WVR;
358 ctrl_base = ARM_BASE_BCR + core_num_brps;
359 val_base = ARM_BASE_BVR + core_num_brps;
360 /* Override the watchpoint data with the step data. */
361 addr = info->trigger & ~0x3;
362 ctrl = encode_ctrl_reg(info->step_ctrl);
363 } else {
364 ctrl_base = ARM_BASE_WCR;
365 val_base = ARM_BASE_WVR;
366 }
367 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 347 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
368 max_slots = core_num_wrps; 348 max_slots = core_num_wrps;
369 } 349 }
@@ -382,6 +362,17 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
382 goto out; 362 goto out;
383 } 363 }
384 364
365 /* Override the breakpoint data with the step data. */
366 if (info->step_ctrl.enabled) {
367 addr = info->trigger & ~0x3;
368 ctrl = encode_ctrl_reg(info->step_ctrl);
369 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
370 i = 0;
371 ctrl_base = ARM_BASE_BCR + core_num_brps;
372 val_base = ARM_BASE_BVR + core_num_brps;
373 }
374 }
375
385 /* Setup the address register. */ 376 /* Setup the address register. */
386 write_wb_reg(val_base + i, addr); 377 write_wb_reg(val_base + i, addr);
387 378
@@ -405,10 +396,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
405 max_slots = core_num_brps; 396 max_slots = core_num_brps;
406 } else { 397 } else {
407 /* Watchpoint */ 398 /* Watchpoint */
408 if (info->step_ctrl.enabled) 399 base = ARM_BASE_WCR;
409 base = ARM_BASE_BCR + core_num_brps;
410 else
411 base = ARM_BASE_WCR;
412 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 400 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
413 max_slots = core_num_wrps; 401 max_slots = core_num_wrps;
414 } 402 }
@@ -426,6 +414,13 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
426 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) 414 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
427 return; 415 return;
428 416
417 /* Ensure that we disable the mismatch breakpoint. */
418 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
419 info->step_ctrl.enabled) {
420 i = 0;
421 base = ARM_BASE_BCR + core_num_brps;
422 }
423
429 /* Reset the control register. */ 424 /* Reset the control register. */
430 write_wb_reg(base + i, 0); 425 write_wb_reg(base + i, 0);
431} 426}
@@ -632,10 +627,9 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
632 * we can use the mismatch feature as a poor-man's hardware 627 * we can use the mismatch feature as a poor-man's hardware
633 * single-step, but this only works for per-task breakpoints. 628 * single-step, but this only works for per-task breakpoints.
634 */ 629 */
635 if (WARN_ONCE(!bp->overflow_handler && 630 if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
636 (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() 631 !core_has_mismatch_brps() || !bp->hw.bp_target)) {
637 || !bp->hw.bp_target), 632 pr_warning("overflow handler required but none found\n");
638 "overflow handler required but none found\n")) {
639 ret = -EINVAL; 633 ret = -EINVAL;
640 } 634 }
641out: 635out:
@@ -666,34 +660,62 @@ static void disable_single_step(struct perf_event *bp)
666 arch_install_hw_breakpoint(bp); 660 arch_install_hw_breakpoint(bp);
667} 661}
668 662
669static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) 663static void watchpoint_handler(unsigned long addr, unsigned int fsr,
664 struct pt_regs *regs)
670{ 665{
671 int i; 666 int i, access;
667 u32 val, ctrl_reg, alignment_mask;
672 struct perf_event *wp, **slots; 668 struct perf_event *wp, **slots;
673 struct arch_hw_breakpoint *info; 669 struct arch_hw_breakpoint *info;
670 struct arch_hw_breakpoint_ctrl ctrl;
674 671
675 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 672 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
676 673
677 /* Without a disassembler, we can only handle 1 watchpoint. */
678 BUG_ON(core_num_wrps > 1);
679
680 for (i = 0; i < core_num_wrps; ++i) { 674 for (i = 0; i < core_num_wrps; ++i) {
681 rcu_read_lock(); 675 rcu_read_lock();
682 676
683 wp = slots[i]; 677 wp = slots[i];
684 678
685 if (wp == NULL) { 679 if (wp == NULL)
686 rcu_read_unlock(); 680 goto unlock;
687 continue;
688 }
689 681
682 info = counter_arch_bp(wp);
690 /* 683 /*
691 * The DFAR is an unknown value. Since we only allow a 684 * The DFAR is an unknown value on debug architectures prior
692 * single watchpoint, we can set the trigger to the lowest 685 * to 7.1. Since we only allow a single watchpoint on these
693 * possible faulting address. 686 * older CPUs, we can set the trigger to the lowest possible
687 * faulting address.
694 */ 688 */
695 info = counter_arch_bp(wp); 689 if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
696 info->trigger = wp->attr.bp_addr; 690 BUG_ON(i > 0);
691 info->trigger = wp->attr.bp_addr;
692 } else {
693 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
694 alignment_mask = 0x7;
695 else
696 alignment_mask = 0x3;
697
698 /* Check if the watchpoint value matches. */
699 val = read_wb_reg(ARM_BASE_WVR + i);
700 if (val != (addr & ~alignment_mask))
701 goto unlock;
702
703 /* Possible match, check the byte address select. */
704 ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
705 decode_ctrl_reg(ctrl_reg, &ctrl);
706 if (!((1 << (addr & alignment_mask)) & ctrl.len))
707 goto unlock;
708
709 /* Check that the access type matches. */
710 access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
711 HW_BREAKPOINT_R;
712 if (!(access & hw_breakpoint_type(wp)))
713 goto unlock;
714
715 /* We have a winner. */
716 info->trigger = addr;
717 }
718
697 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); 719 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
698 perf_bp_event(wp, regs); 720 perf_bp_event(wp, regs);
699 721
@@ -705,6 +727,7 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
705 if (!wp->overflow_handler) 727 if (!wp->overflow_handler)
706 enable_single_step(wp, instruction_pointer(regs)); 728 enable_single_step(wp, instruction_pointer(regs));
707 729
730unlock:
708 rcu_read_unlock(); 731 rcu_read_unlock();
709 } 732 }
710} 733}
@@ -717,7 +740,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
717 740
718 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 741 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
719 742
720 for (i = 0; i < core_num_reserved_brps; ++i) { 743 for (i = 0; i < core_num_wrps; ++i) {
721 rcu_read_lock(); 744 rcu_read_lock();
722 745
723 wp = slots[i]; 746 wp = slots[i];
@@ -820,7 +843,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
820 case ARM_ENTRY_ASYNC_WATCHPOINT: 843 case ARM_ENTRY_ASYNC_WATCHPOINT:
821 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); 844 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
822 case ARM_ENTRY_SYNC_WATCHPOINT: 845 case ARM_ENTRY_SYNC_WATCHPOINT:
823 watchpoint_handler(addr, regs); 846 watchpoint_handler(addr, fsr, regs);
824 break; 847 break;
825 default: 848 default:
826 ret = 1; /* Unhandled fault. */ 849 ret = 1; /* Unhandled fault. */
@@ -834,11 +857,31 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
834/* 857/*
835 * One-time initialisation. 858 * One-time initialisation.
836 */ 859 */
837static void reset_ctrl_regs(void *info) 860static cpumask_t debug_err_mask;
861
862static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
838{ 863{
839 int i, cpu = smp_processor_id(); 864 int cpu = smp_processor_id();
865
866 pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
867 instr, cpu);
868
869 /* Set the error flag for this CPU and skip the faulting instruction. */
870 cpumask_set_cpu(cpu, &debug_err_mask);
871 instruction_pointer(regs) += 4;
872 return 0;
873}
874
875static struct undef_hook debug_reg_hook = {
876 .instr_mask = 0x0fe80f10,
877 .instr_val = 0x0e000e10,
878 .fn = debug_reg_trap,
879};
880
881static void reset_ctrl_regs(void *unused)
882{
883 int i, raw_num_brps, err = 0, cpu = smp_processor_id();
840 u32 dbg_power; 884 u32 dbg_power;
841 cpumask_t *cpumask = info;
842 885
843 /* 886 /*
844 * v7 debug contains save and restore registers so that debug state 887 * v7 debug contains save and restore registers so that debug state
@@ -848,38 +891,52 @@ static void reset_ctrl_regs(void *info)
848 * Access Register to avoid taking undefined instruction exceptions 891 * Access Register to avoid taking undefined instruction exceptions
849 * later on. 892 * later on.
850 */ 893 */
851 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 894 switch (debug_arch) {
895 case ARM_DEBUG_ARCH_V7_ECP14:
852 /* 896 /*
853 * Ensure sticky power-down is clear (i.e. debug logic is 897 * Ensure sticky power-down is clear (i.e. debug logic is
854 * powered up). 898 * powered up).
855 */ 899 */
856 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); 900 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
857 if ((dbg_power & 0x1) == 0) { 901 if ((dbg_power & 0x1) == 0)
858 pr_warning("CPU %d debug is powered down!\n", cpu); 902 err = -EPERM;
859 cpumask_or(cpumask, cpumask, cpumask_of(cpu)); 903 break;
860 return; 904 case ARM_DEBUG_ARCH_V7_1:
861 }
862
863 /* 905 /*
864 * Unconditionally clear the lock by writing a value 906 * Ensure the OS double lock is clear.
865 * other than 0xC5ACCE55 to the access register.
866 */ 907 */
867 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 908 asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
868 isb(); 909 if ((dbg_power & 0x1) == 1)
910 err = -EPERM;
911 break;
912 }
869 913
870 /* 914 if (err) {
871 * Clear any configured vector-catch events before 915 pr_warning("CPU %d debug is powered down!\n", cpu);
872 * enabling monitor mode. 916 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
873 */ 917 return;
874 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
875 isb();
876 } 918 }
877 919
920 /*
921 * Unconditionally clear the lock by writing a value
922 * other than 0xC5ACCE55 to the access register.
923 */
924 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
925 isb();
926
927 /*
928 * Clear any configured vector-catch events before
929 * enabling monitor mode.
930 */
931 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
932 isb();
933
878 if (enable_monitor_mode()) 934 if (enable_monitor_mode())
879 return; 935 return;
880 936
881 /* We must also reset any reserved registers. */ 937 /* We must also reset any reserved registers. */
882 for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) { 938 raw_num_brps = get_num_brp_resources();
939 for (i = 0; i < raw_num_brps; ++i) {
883 write_wb_reg(ARM_BASE_BCR + i, 0UL); 940 write_wb_reg(ARM_BASE_BCR + i, 0UL);
884 write_wb_reg(ARM_BASE_BVR + i, 0UL); 941 write_wb_reg(ARM_BASE_BVR + i, 0UL);
885 } 942 }
@@ -895,6 +952,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
895{ 952{
896 if (action == CPU_ONLINE) 953 if (action == CPU_ONLINE)
897 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); 954 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
955
898 return NOTIFY_OK; 956 return NOTIFY_OK;
899} 957}
900 958
@@ -905,7 +963,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
905static int __init arch_hw_breakpoint_init(void) 963static int __init arch_hw_breakpoint_init(void)
906{ 964{
907 u32 dscr; 965 u32 dscr;
908 cpumask_t cpumask = { CPU_BITS_NONE };
909 966
910 debug_arch = get_debug_arch(); 967 debug_arch = get_debug_arch();
911 968
@@ -916,28 +973,31 @@ static int __init arch_hw_breakpoint_init(void)
916 973
917 /* Determine how many BRPs/WRPs are available. */ 974 /* Determine how many BRPs/WRPs are available. */
918 core_num_brps = get_num_brps(); 975 core_num_brps = get_num_brps();
919 core_num_reserved_brps = get_num_reserved_brps();
920 core_num_wrps = get_num_wrps(); 976 core_num_wrps = get_num_wrps();
921 977
922 pr_info("found %d breakpoint and %d watchpoint registers.\n", 978 /*
923 core_num_brps + core_num_reserved_brps, core_num_wrps); 979 * We need to tread carefully here because DBGSWENABLE may be
924 980 * driven low on this core and there isn't an architected way to
925 if (core_num_reserved_brps) 981 * determine that.
926 pr_info("%d breakpoint(s) reserved for watchpoint " 982 */
927 "single-step.\n", core_num_reserved_brps); 983 register_undef_hook(&debug_reg_hook);
928 984
929 /* 985 /*
930 * Reset the breakpoint resources. We assume that a halting 986 * Reset the breakpoint resources. We assume that a halting
931 * debugger will leave the world in a nice state for us. 987 * debugger will leave the world in a nice state for us.
932 */ 988 */
933 on_each_cpu(reset_ctrl_regs, &cpumask, 1); 989 on_each_cpu(reset_ctrl_regs, NULL, 1);
934 if (!cpumask_empty(&cpumask)) { 990 unregister_undef_hook(&debug_reg_hook);
991 if (!cpumask_empty(&debug_err_mask)) {
935 core_num_brps = 0; 992 core_num_brps = 0;
936 core_num_reserved_brps = 0;
937 core_num_wrps = 0; 993 core_num_wrps = 0;
938 return 0; 994 return 0;
939 } 995 }
940 996
997 pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
998 core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
999 "", core_num_wrps);
1000
941 ARM_DBG_READ(c1, 0, dscr); 1001 ARM_DBG_READ(c1, 0, dscr);
942 if (dscr & ARM_DSCR_HDBGEN) { 1002 if (dscr & ARM_DSCR_HDBGEN) {
943 max_watchpoint_len = 4; 1003 max_watchpoint_len = 4;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 53c9c2610cbc..e6e5d7c84f1a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#define pr_fmt(fmt) "hw perfevents: " fmt 13#define pr_fmt(fmt) "hw perfevents: " fmt
14 14
15#include <linux/bitmap.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
@@ -26,16 +27,8 @@
26#include <asm/pmu.h> 27#include <asm/pmu.h>
27#include <asm/stacktrace.h> 28#include <asm/stacktrace.h>
28 29
29static struct platform_device *pmu_device;
30
31/*
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
34 */
35static DEFINE_RAW_SPINLOCK(pmu_lock);
36
37/* 30/*
38 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add 31 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
39 * another platform that supports more, we need to increase this to be the 32 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms. 33 * largest of all platforms.
41 * 34 *
@@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock);
43 * cycle counter CCNT + 31 events counters CNT0..30. 36 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. 37 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
45 */ 38 */
46#define ARMPMU_MAX_HWEVENTS 33 39#define ARMPMU_MAX_HWEVENTS 32
47 40
48/* The events for a given CPU. */ 41static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
49struct cpu_hw_events { 42static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
50 /* 43static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
51 * The events that are active on the CPU for the given index. Index 0
52 * is reserved.
53 */
54 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
55
56 /*
57 * A 1 bit for an index indicates that the counter is being used for
58 * an event. A 0 means that the counter can be used.
59 */
60 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
61 44
62 /* 45#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
63 * A 1 bit for an index indicates that the counter is actively being
64 * used.
65 */
66 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
67};
68static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
69
70struct arm_pmu {
71 enum arm_perf_pmu_ids id;
72 const char *name;
73 irqreturn_t (*handle_irq)(int irq_num, void *dev);
74 void (*enable)(struct hw_perf_event *evt, int idx);
75 void (*disable)(struct hw_perf_event *evt, int idx);
76 int (*get_event_idx)(struct cpu_hw_events *cpuc,
77 struct hw_perf_event *hwc);
78 u32 (*read_counter)(int idx);
79 void (*write_counter)(int idx, u32 val);
80 void (*start)(void);
81 void (*stop)(void);
82 void (*reset)(void *);
83 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
84 [PERF_COUNT_HW_CACHE_OP_MAX]
85 [PERF_COUNT_HW_CACHE_RESULT_MAX];
86 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
87 u32 raw_event_mask;
88 int num_events;
89 u64 max_period;
90};
91 46
92/* Set at runtime when we know what CPU type we are. */ 47/* Set at runtime when we know what CPU type we are. */
93static const struct arm_pmu *armpmu; 48static struct arm_pmu *cpu_pmu;
94 49
95enum arm_perf_pmu_ids 50enum arm_perf_pmu_ids
96armpmu_get_pmu_id(void) 51armpmu_get_pmu_id(void)
97{ 52{
98 int id = -ENODEV; 53 int id = -ENODEV;
99 54
100 if (armpmu != NULL) 55 if (cpu_pmu != NULL)
101 id = armpmu->id; 56 id = cpu_pmu->id;
102 57
103 return id; 58 return id;
104} 59}
@@ -109,8 +64,8 @@ armpmu_get_max_events(void)
109{ 64{
110 int max_events = 0; 65 int max_events = 0;
111 66
112 if (armpmu != NULL) 67 if (cpu_pmu != NULL)
113 max_events = armpmu->num_events; 68 max_events = cpu_pmu->num_events;
114 69
115 return max_events; 70 return max_events;
116} 71}
@@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
130#define CACHE_OP_UNSUPPORTED 0xFFFF 85#define CACHE_OP_UNSUPPORTED 0xFFFF
131 86
132static int 87static int
133armpmu_map_cache_event(u64 config) 88armpmu_map_cache_event(const unsigned (*cache_map)
89 [PERF_COUNT_HW_CACHE_MAX]
90 [PERF_COUNT_HW_CACHE_OP_MAX]
91 [PERF_COUNT_HW_CACHE_RESULT_MAX],
92 u64 config)
134{ 93{
135 unsigned int cache_type, cache_op, cache_result, ret; 94 unsigned int cache_type, cache_op, cache_result, ret;
136 95
@@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config)
146 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 105 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
147 return -EINVAL; 106 return -EINVAL;
148 107
149 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; 108 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
150 109
151 if (ret == CACHE_OP_UNSUPPORTED) 110 if (ret == CACHE_OP_UNSUPPORTED)
152 return -ENOENT; 111 return -ENOENT;
@@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config)
155} 114}
156 115
157static int 116static int
158armpmu_map_event(u64 config) 117armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
159{ 118{
160 int mapping = (*armpmu->event_map)[config]; 119 int mapping = (*event_map)[config];
161 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; 120 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
162} 121}
163 122
164static int 123static int
165armpmu_map_raw_event(u64 config) 124armpmu_map_raw_event(u32 raw_event_mask, u64 config)
166{ 125{
167 return (int)(config & armpmu->raw_event_mask); 126 return (int)(config & raw_event_mask);
168} 127}
169 128
170static int 129static int map_cpu_event(struct perf_event *event,
130 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
131 const unsigned (*cache_map)
132 [PERF_COUNT_HW_CACHE_MAX]
133 [PERF_COUNT_HW_CACHE_OP_MAX]
134 [PERF_COUNT_HW_CACHE_RESULT_MAX],
135 u32 raw_event_mask)
136{
137 u64 config = event->attr.config;
138
139 switch (event->attr.type) {
140 case PERF_TYPE_HARDWARE:
141 return armpmu_map_event(event_map, config);
142 case PERF_TYPE_HW_CACHE:
143 return armpmu_map_cache_event(cache_map, config);
144 case PERF_TYPE_RAW:
145 return armpmu_map_raw_event(raw_event_mask, config);
146 }
147
148 return -ENOENT;
149}
150
151int
171armpmu_event_set_period(struct perf_event *event, 152armpmu_event_set_period(struct perf_event *event,
172 struct hw_perf_event *hwc, 153 struct hw_perf_event *hwc,
173 int idx) 154 int idx)
174{ 155{
156 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
175 s64 left = local64_read(&hwc->period_left); 157 s64 left = local64_read(&hwc->period_left);
176 s64 period = hwc->sample_period; 158 s64 period = hwc->sample_period;
177 int ret = 0; 159 int ret = 0;
@@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event,
202 return ret; 184 return ret;
203} 185}
204 186
205static u64 187u64
206armpmu_event_update(struct perf_event *event, 188armpmu_event_update(struct perf_event *event,
207 struct hw_perf_event *hwc, 189 struct hw_perf_event *hwc,
208 int idx, int overflow) 190 int idx, int overflow)
209{ 191{
192 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
210 u64 delta, prev_raw_count, new_raw_count; 193 u64 delta, prev_raw_count, new_raw_count;
211 194
212again: 195again:
@@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event)
246static void 229static void
247armpmu_stop(struct perf_event *event, int flags) 230armpmu_stop(struct perf_event *event, int flags)
248{ 231{
232 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
249 struct hw_perf_event *hwc = &event->hw; 233 struct hw_perf_event *hwc = &event->hw;
250 234
251 if (!armpmu)
252 return;
253
254 /* 235 /*
255 * ARM pmu always has to update the counter, so ignore 236 * ARM pmu always has to update the counter, so ignore
256 * PERF_EF_UPDATE, see comments in armpmu_start(). 237 * PERF_EF_UPDATE, see comments in armpmu_start().
@@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags)
266static void 247static void
267armpmu_start(struct perf_event *event, int flags) 248armpmu_start(struct perf_event *event, int flags)
268{ 249{
250 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
269 struct hw_perf_event *hwc = &event->hw; 251 struct hw_perf_event *hwc = &event->hw;
270 252
271 if (!armpmu)
272 return;
273
274 /* 253 /*
275 * ARM pmu always has to reprogram the period, so ignore 254 * ARM pmu always has to reprogram the period, so ignore
276 * PERF_EF_RELOAD, see the comment below. 255 * PERF_EF_RELOAD, see the comment below.
@@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags)
293static void 272static void
294armpmu_del(struct perf_event *event, int flags) 273armpmu_del(struct perf_event *event, int flags)
295{ 274{
296 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 275 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
276 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
297 struct hw_perf_event *hwc = &event->hw; 277 struct hw_perf_event *hwc = &event->hw;
298 int idx = hwc->idx; 278 int idx = hwc->idx;
299 279
300 WARN_ON(idx < 0); 280 WARN_ON(idx < 0);
301 281
302 clear_bit(idx, cpuc->active_mask);
303 armpmu_stop(event, PERF_EF_UPDATE); 282 armpmu_stop(event, PERF_EF_UPDATE);
304 cpuc->events[idx] = NULL; 283 hw_events->events[idx] = NULL;
305 clear_bit(idx, cpuc->used_mask); 284 clear_bit(idx, hw_events->used_mask);
306 285
307 perf_event_update_userpage(event); 286 perf_event_update_userpage(event);
308} 287}
@@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags)
310static int 289static int
311armpmu_add(struct perf_event *event, int flags) 290armpmu_add(struct perf_event *event, int flags)
312{ 291{
313 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 292 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
293 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
314 struct hw_perf_event *hwc = &event->hw; 294 struct hw_perf_event *hwc = &event->hw;
315 int idx; 295 int idx;
316 int err = 0; 296 int err = 0;
@@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags)
318 perf_pmu_disable(event->pmu); 298 perf_pmu_disable(event->pmu);
319 299
320 /* If we don't have a space for the counter then finish early. */ 300 /* If we don't have a space for the counter then finish early. */
321 idx = armpmu->get_event_idx(cpuc, hwc); 301 idx = armpmu->get_event_idx(hw_events, hwc);
322 if (idx < 0) { 302 if (idx < 0) {
323 err = idx; 303 err = idx;
324 goto out; 304 goto out;
@@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags)
330 */ 310 */
331 event->hw.idx = idx; 311 event->hw.idx = idx;
332 armpmu->disable(hwc, idx); 312 armpmu->disable(hwc, idx);
333 cpuc->events[idx] = event; 313 hw_events->events[idx] = event;
334 set_bit(idx, cpuc->active_mask);
335 314
336 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 315 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
337 if (flags & PERF_EF_START) 316 if (flags & PERF_EF_START)
@@ -345,25 +324,25 @@ out:
345 return err; 324 return err;
346} 325}
347 326
348static struct pmu pmu;
349
350static int 327static int
351validate_event(struct cpu_hw_events *cpuc, 328validate_event(struct pmu_hw_events *hw_events,
352 struct perf_event *event) 329 struct perf_event *event)
353{ 330{
331 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
354 struct hw_perf_event fake_event = event->hw; 332 struct hw_perf_event fake_event = event->hw;
333 struct pmu *leader_pmu = event->group_leader->pmu;
355 334
356 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) 335 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
357 return 1; 336 return 1;
358 337
359 return armpmu->get_event_idx(cpuc, &fake_event) >= 0; 338 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
360} 339}
361 340
362static int 341static int
363validate_group(struct perf_event *event) 342validate_group(struct perf_event *event)
364{ 343{
365 struct perf_event *sibling, *leader = event->group_leader; 344 struct perf_event *sibling, *leader = event->group_leader;
366 struct cpu_hw_events fake_pmu; 345 struct pmu_hw_events fake_pmu;
367 346
368 memset(&fake_pmu, 0, sizeof(fake_pmu)); 347 memset(&fake_pmu, 0, sizeof(fake_pmu));
369 348
@@ -383,110 +362,119 @@ validate_group(struct perf_event *event)
383 362
384static irqreturn_t armpmu_platform_irq(int irq, void *dev) 363static irqreturn_t armpmu_platform_irq(int irq, void *dev)
385{ 364{
386 struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev); 365 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
366 struct platform_device *plat_device = armpmu->plat_device;
367 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
387 368
388 return plat->handle_irq(irq, dev, armpmu->handle_irq); 369 return plat->handle_irq(irq, dev, armpmu->handle_irq);
389} 370}
390 371
372static void
373armpmu_release_hardware(struct arm_pmu *armpmu)
374{
375 int i, irq, irqs;
376 struct platform_device *pmu_device = armpmu->plat_device;
377
378 irqs = min(pmu_device->num_resources, num_possible_cpus());
379
380 for (i = 0; i < irqs; ++i) {
381 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
382 continue;
383 irq = platform_get_irq(pmu_device, i);
384 if (irq >= 0)
385 free_irq(irq, armpmu);
386 }
387
388 release_pmu(armpmu->type);
389}
390
391static int 391static int
392armpmu_reserve_hardware(void) 392armpmu_reserve_hardware(struct arm_pmu *armpmu)
393{ 393{
394 struct arm_pmu_platdata *plat; 394 struct arm_pmu_platdata *plat;
395 irq_handler_t handle_irq; 395 irq_handler_t handle_irq;
396 int i, err = -ENODEV, irq; 396 int i, err, irq, irqs;
397 struct platform_device *pmu_device = armpmu->plat_device;
397 398
398 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU); 399 err = reserve_pmu(armpmu->type);
399 if (IS_ERR(pmu_device)) { 400 if (err) {
400 pr_warning("unable to reserve pmu\n"); 401 pr_warning("unable to reserve pmu\n");
401 return PTR_ERR(pmu_device); 402 return err;
402 } 403 }
403 404
404 init_pmu(ARM_PMU_DEVICE_CPU);
405
406 plat = dev_get_platdata(&pmu_device->dev); 405 plat = dev_get_platdata(&pmu_device->dev);
407 if (plat && plat->handle_irq) 406 if (plat && plat->handle_irq)
408 handle_irq = armpmu_platform_irq; 407 handle_irq = armpmu_platform_irq;
409 else 408 else
410 handle_irq = armpmu->handle_irq; 409 handle_irq = armpmu->handle_irq;
411 410
412 if (pmu_device->num_resources < 1) { 411 irqs = min(pmu_device->num_resources, num_possible_cpus());
412 if (irqs < 1) {
413 pr_err("no irqs for PMUs defined\n"); 413 pr_err("no irqs for PMUs defined\n");
414 return -ENODEV; 414 return -ENODEV;
415 } 415 }
416 416
417 for (i = 0; i < pmu_device->num_resources; ++i) { 417 for (i = 0; i < irqs; ++i) {
418 err = 0;
418 irq = platform_get_irq(pmu_device, i); 419 irq = platform_get_irq(pmu_device, i);
419 if (irq < 0) 420 if (irq < 0)
420 continue; 421 continue;
421 422
423 /*
424 * If we have a single PMU interrupt that we can't shift,
425 * assume that we're running on a uniprocessor machine and
426 * continue. Otherwise, continue without this interrupt.
427 */
428 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
429 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
430 irq, i);
431 continue;
432 }
433
422 err = request_irq(irq, handle_irq, 434 err = request_irq(irq, handle_irq,
423 IRQF_DISABLED | IRQF_NOBALANCING, 435 IRQF_DISABLED | IRQF_NOBALANCING,
424 "armpmu", NULL); 436 "arm-pmu", armpmu);
425 if (err) { 437 if (err) {
426 pr_warning("unable to request IRQ%d for ARM perf " 438 pr_err("unable to request IRQ%d for ARM PMU counters\n",
427 "counters\n", irq); 439 irq);
428 break; 440 armpmu_release_hardware(armpmu);
441 return err;
429 } 442 }
430 }
431 443
432 if (err) { 444 cpumask_set_cpu(i, &armpmu->active_irqs);
433 for (i = i - 1; i >= 0; --i) {
434 irq = platform_get_irq(pmu_device, i);
435 if (irq >= 0)
436 free_irq(irq, NULL);
437 }
438 release_pmu(ARM_PMU_DEVICE_CPU);
439 pmu_device = NULL;
440 } 445 }
441 446
442 return err; 447 return 0;
443} 448}
444 449
445static void 450static void
446armpmu_release_hardware(void) 451hw_perf_event_destroy(struct perf_event *event)
447{ 452{
448 int i, irq; 453 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
454 atomic_t *active_events = &armpmu->active_events;
455 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
449 456
450 for (i = pmu_device->num_resources - 1; i >= 0; --i) { 457 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
451 irq = platform_get_irq(pmu_device, i); 458 armpmu_release_hardware(armpmu);
452 if (irq >= 0) 459 mutex_unlock(pmu_reserve_mutex);
453 free_irq(irq, NULL);
454 } 460 }
455 armpmu->stop();
456
457 release_pmu(ARM_PMU_DEVICE_CPU);
458 pmu_device = NULL;
459} 461}
460 462
461static atomic_t active_events = ATOMIC_INIT(0); 463static int
462static DEFINE_MUTEX(pmu_reserve_mutex); 464event_requires_mode_exclusion(struct perf_event_attr *attr)
463
464static void
465hw_perf_event_destroy(struct perf_event *event)
466{ 465{
467 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { 466 return attr->exclude_idle || attr->exclude_user ||
468 armpmu_release_hardware(); 467 attr->exclude_kernel || attr->exclude_hv;
469 mutex_unlock(&pmu_reserve_mutex);
470 }
471} 468}
472 469
473static int 470static int
474__hw_perf_event_init(struct perf_event *event) 471__hw_perf_event_init(struct perf_event *event)
475{ 472{
473 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
476 struct hw_perf_event *hwc = &event->hw; 474 struct hw_perf_event *hwc = &event->hw;
477 int mapping, err; 475 int mapping, err;
478 476
479 /* Decode the generic type into an ARM event identifier. */ 477 mapping = armpmu->map_event(event);
480 if (PERF_TYPE_HARDWARE == event->attr.type) {
481 mapping = armpmu_map_event(event->attr.config);
482 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
483 mapping = armpmu_map_cache_event(event->attr.config);
484 } else if (PERF_TYPE_RAW == event->attr.type) {
485 mapping = armpmu_map_raw_event(event->attr.config);
486 } else {
487 pr_debug("event type %x not supported\n", event->attr.type);
488 return -EOPNOTSUPP;
489 }
490 478
491 if (mapping < 0) { 479 if (mapping < 0) {
492 pr_debug("event %x:%llx not supported\n", event->attr.type, 480 pr_debug("event %x:%llx not supported\n", event->attr.type,
@@ -495,34 +483,31 @@ __hw_perf_event_init(struct perf_event *event)
495 } 483 }
496 484
497 /* 485 /*
486 * We don't assign an index until we actually place the event onto
487 * hardware. Use -1 to signify that we haven't decided where to put it
488 * yet. For SMP systems, each core has it's own PMU so we can't do any
489 * clever allocation or constraints checking at this point.
490 */
491 hwc->idx = -1;
492 hwc->config_base = 0;
493 hwc->config = 0;
494 hwc->event_base = 0;
495
496 /*
498 * Check whether we need to exclude the counter from certain modes. 497 * Check whether we need to exclude the counter from certain modes.
499 * The ARM performance counters are on all of the time so if someone
500 * has asked us for some excludes then we have to fail.
501 */ 498 */
502 if (event->attr.exclude_kernel || event->attr.exclude_user || 499 if ((!armpmu->set_event_filter ||
503 event->attr.exclude_hv || event->attr.exclude_idle) { 500 armpmu->set_event_filter(hwc, &event->attr)) &&
501 event_requires_mode_exclusion(&event->attr)) {
504 pr_debug("ARM performance counters do not support " 502 pr_debug("ARM performance counters do not support "
505 "mode exclusion\n"); 503 "mode exclusion\n");
506 return -EPERM; 504 return -EPERM;
507 } 505 }
508 506
509 /* 507 /*
510 * We don't assign an index until we actually place the event onto 508 * Store the event encoding into the config_base field.
511 * hardware. Use -1 to signify that we haven't decided where to put it
512 * yet. For SMP systems, each core has it's own PMU so we can't do any
513 * clever allocation or constraints checking at this point.
514 */ 509 */
515 hwc->idx = -1; 510 hwc->config_base |= (unsigned long)mapping;
516
517 /*
518 * Store the event encoding into the config_base field. config and
519 * event_base are unused as the only 2 things we need to know are
520 * the event mapping and the counter to use. The counter to use is
521 * also the indx and the config_base is the event type.
522 */
523 hwc->config_base = (unsigned long)mapping;
524 hwc->config = 0;
525 hwc->event_base = 0;
526 511
527 if (!hwc->sample_period) { 512 if (!hwc->sample_period) {
528 hwc->sample_period = armpmu->max_period; 513 hwc->sample_period = armpmu->max_period;
@@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event)
542 527
543static int armpmu_event_init(struct perf_event *event) 528static int armpmu_event_init(struct perf_event *event)
544{ 529{
530 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
545 int err = 0; 531 int err = 0;
532 atomic_t *active_events = &armpmu->active_events;
546 533
547 switch (event->attr.type) { 534 if (armpmu->map_event(event) == -ENOENT)
548 case PERF_TYPE_RAW:
549 case PERF_TYPE_HARDWARE:
550 case PERF_TYPE_HW_CACHE:
551 break;
552
553 default:
554 return -ENOENT; 535 return -ENOENT;
555 }
556
557 if (!armpmu)
558 return -ENODEV;
559 536
560 event->destroy = hw_perf_event_destroy; 537 event->destroy = hw_perf_event_destroy;
561 538
562 if (!atomic_inc_not_zero(&active_events)) { 539 if (!atomic_inc_not_zero(active_events)) {
563 mutex_lock(&pmu_reserve_mutex); 540 mutex_lock(&armpmu->reserve_mutex);
564 if (atomic_read(&active_events) == 0) { 541 if (atomic_read(active_events) == 0)
565 err = armpmu_reserve_hardware(); 542 err = armpmu_reserve_hardware(armpmu);
566 }
567 543
568 if (!err) 544 if (!err)
569 atomic_inc(&active_events); 545 atomic_inc(active_events);
570 mutex_unlock(&pmu_reserve_mutex); 546 mutex_unlock(&armpmu->reserve_mutex);
571 } 547 }
572 548
573 if (err) 549 if (err)
@@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event)
582 558
583static void armpmu_enable(struct pmu *pmu) 559static void armpmu_enable(struct pmu *pmu)
584{ 560{
585 /* Enable all of the perf events on hardware. */ 561 struct arm_pmu *armpmu = to_arm_pmu(pmu);
586 int idx, enabled = 0; 562 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
587 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 563 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
588
589 if (!armpmu)
590 return;
591
592 for (idx = 0; idx <= armpmu->num_events; ++idx) {
593 struct perf_event *event = cpuc->events[idx];
594
595 if (!event)
596 continue;
597
598 armpmu->enable(&event->hw, idx);
599 enabled = 1;
600 }
601 564
602 if (enabled) 565 if (enabled)
603 armpmu->start(); 566 armpmu->start();
@@ -605,20 +568,32 @@ static void armpmu_enable(struct pmu *pmu)
605 568
606static void armpmu_disable(struct pmu *pmu) 569static void armpmu_disable(struct pmu *pmu)
607{ 570{
608 if (armpmu) 571 struct arm_pmu *armpmu = to_arm_pmu(pmu);
609 armpmu->stop(); 572 armpmu->stop();
610} 573}
611 574
612static struct pmu pmu = { 575static void __init armpmu_init(struct arm_pmu *armpmu)
613 .pmu_enable = armpmu_enable, 576{
614 .pmu_disable = armpmu_disable, 577 atomic_set(&armpmu->active_events, 0);
615 .event_init = armpmu_event_init, 578 mutex_init(&armpmu->reserve_mutex);
616 .add = armpmu_add, 579
617 .del = armpmu_del, 580 armpmu->pmu = (struct pmu) {
618 .start = armpmu_start, 581 .pmu_enable = armpmu_enable,
619 .stop = armpmu_stop, 582 .pmu_disable = armpmu_disable,
620 .read = armpmu_read, 583 .event_init = armpmu_event_init,
621}; 584 .add = armpmu_add,
585 .del = armpmu_del,
586 .start = armpmu_start,
587 .stop = armpmu_stop,
588 .read = armpmu_read,
589 };
590}
591
592int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
593{
594 armpmu_init(armpmu);
595 return perf_pmu_register(&armpmu->pmu, name, type);
596}
622 597
623/* Include the PMU-specific implementations. */ 598/* Include the PMU-specific implementations. */
624#include "perf_event_xscale.c" 599#include "perf_event_xscale.c"
@@ -630,14 +605,72 @@ static struct pmu pmu = {
630 * This requires SMP to be available, so exists as a separate initcall. 605 * This requires SMP to be available, so exists as a separate initcall.
631 */ 606 */
632static int __init 607static int __init
633armpmu_reset(void) 608cpu_pmu_reset(void)
609{
610 if (cpu_pmu && cpu_pmu->reset)
611 return on_each_cpu(cpu_pmu->reset, NULL, 1);
612 return 0;
613}
614arch_initcall(cpu_pmu_reset);
615
616/*
617 * PMU platform driver and devicetree bindings.
618 */
619static struct of_device_id armpmu_of_device_ids[] = {
620 {.compatible = "arm,cortex-a9-pmu"},
621 {.compatible = "arm,cortex-a8-pmu"},
622 {.compatible = "arm,arm1136-pmu"},
623 {.compatible = "arm,arm1176-pmu"},
624 {},
625};
626
627static struct platform_device_id armpmu_plat_device_ids[] = {
628 {.name = "arm-pmu"},
629 {},
630};
631
632static int __devinit armpmu_device_probe(struct platform_device *pdev)
634{ 633{
635 if (armpmu && armpmu->reset) 634 cpu_pmu->plat_device = pdev;
636 return on_each_cpu(armpmu->reset, NULL, 1);
637 return 0; 635 return 0;
638} 636}
639arch_initcall(armpmu_reset);
640 637
638static struct platform_driver armpmu_driver = {
639 .driver = {
640 .name = "arm-pmu",
641 .of_match_table = armpmu_of_device_ids,
642 },
643 .probe = armpmu_device_probe,
644 .id_table = armpmu_plat_device_ids,
645};
646
647static int __init register_pmu_driver(void)
648{
649 return platform_driver_register(&armpmu_driver);
650}
651device_initcall(register_pmu_driver);
652
653static struct pmu_hw_events *armpmu_get_cpu_events(void)
654{
655 return &__get_cpu_var(cpu_hw_events);
656}
657
658static void __init cpu_pmu_init(struct arm_pmu *armpmu)
659{
660 int cpu;
661 for_each_possible_cpu(cpu) {
662 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
663 events->events = per_cpu(hw_events, cpu);
664 events->used_mask = per_cpu(used_mask, cpu);
665 raw_spin_lock_init(&events->pmu_lock);
666 }
667 armpmu->get_hw_events = armpmu_get_cpu_events;
668 armpmu->type = ARM_PMU_DEVICE_CPU;
669}
670
671/*
672 * CPU PMU identification and registration.
673 */
641static int __init 674static int __init
642init_hw_perf_events(void) 675init_hw_perf_events(void)
643{ 676{
@@ -651,22 +684,22 @@ init_hw_perf_events(void)
651 case 0xB360: /* ARM1136 */ 684 case 0xB360: /* ARM1136 */
652 case 0xB560: /* ARM1156 */ 685 case 0xB560: /* ARM1156 */
653 case 0xB760: /* ARM1176 */ 686 case 0xB760: /* ARM1176 */
654 armpmu = armv6pmu_init(); 687 cpu_pmu = armv6pmu_init();
655 break; 688 break;
656 case 0xB020: /* ARM11mpcore */ 689 case 0xB020: /* ARM11mpcore */
657 armpmu = armv6mpcore_pmu_init(); 690 cpu_pmu = armv6mpcore_pmu_init();
658 break; 691 break;
659 case 0xC080: /* Cortex-A8 */ 692 case 0xC080: /* Cortex-A8 */
660 armpmu = armv7_a8_pmu_init(); 693 cpu_pmu = armv7_a8_pmu_init();
661 break; 694 break;
662 case 0xC090: /* Cortex-A9 */ 695 case 0xC090: /* Cortex-A9 */
663 armpmu = armv7_a9_pmu_init(); 696 cpu_pmu = armv7_a9_pmu_init();
664 break; 697 break;
665 case 0xC050: /* Cortex-A5 */ 698 case 0xC050: /* Cortex-A5 */
666 armpmu = armv7_a5_pmu_init(); 699 cpu_pmu = armv7_a5_pmu_init();
667 break; 700 break;
668 case 0xC0F0: /* Cortex-A15 */ 701 case 0xC0F0: /* Cortex-A15 */
669 armpmu = armv7_a15_pmu_init(); 702 cpu_pmu = armv7_a15_pmu_init();
670 break; 703 break;
671 } 704 }
672 /* Intel CPUs [xscale]. */ 705 /* Intel CPUs [xscale]. */
@@ -674,23 +707,23 @@ init_hw_perf_events(void)
674 part_number = (cpuid >> 13) & 0x7; 707 part_number = (cpuid >> 13) & 0x7;
675 switch (part_number) { 708 switch (part_number) {
676 case 1: 709 case 1:
677 armpmu = xscale1pmu_init(); 710 cpu_pmu = xscale1pmu_init();
678 break; 711 break;
679 case 2: 712 case 2:
680 armpmu = xscale2pmu_init(); 713 cpu_pmu = xscale2pmu_init();
681 break; 714 break;
682 } 715 }
683 } 716 }
684 717
685 if (armpmu) { 718 if (cpu_pmu) {
686 pr_info("enabled with %s PMU driver, %d counters available\n", 719 pr_info("enabled with %s PMU driver, %d counters available\n",
687 armpmu->name, armpmu->num_events); 720 cpu_pmu->name, cpu_pmu->num_events);
721 cpu_pmu_init(cpu_pmu);
722 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
688 } else { 723 } else {
689 pr_info("no hardware support available\n"); 724 pr_info("no hardware support available\n");
690 } 725 }
691 726
692 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
693
694 return 0; 727 return 0;
695} 728}
696early_initcall(init_hw_perf_events); 729early_initcall(init_hw_perf_events);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index dd7f3b9f4cb3..e63d8115c01b 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -54,7 +54,7 @@ enum armv6_perf_types {
54}; 54};
55 55
56enum armv6_counters { 56enum armv6_counters {
57 ARMV6_CYCLE_COUNTER = 1, 57 ARMV6_CYCLE_COUNTER = 0,
58 ARMV6_COUNTER0, 58 ARMV6_COUNTER0,
59 ARMV6_COUNTER1, 59 ARMV6_COUNTER1,
60}; 60};
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
433 int idx) 433 int idx)
434{ 434{
435 unsigned long val, mask, evt, flags; 435 unsigned long val, mask, evt, flags;
436 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
436 437
437 if (ARMV6_CYCLE_COUNTER == idx) { 438 if (ARMV6_CYCLE_COUNTER == idx) {
438 mask = 0; 439 mask = 0;
@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
454 * Mask out the current event and set the counter to count the event 455 * Mask out the current event and set the counter to count the event
455 * that we're interested in. 456 * that we're interested in.
456 */ 457 */
457 raw_spin_lock_irqsave(&pmu_lock, flags); 458 raw_spin_lock_irqsave(&events->pmu_lock, flags);
458 val = armv6_pmcr_read(); 459 val = armv6_pmcr_read();
459 val &= ~mask; 460 val &= ~mask;
460 val |= evt; 461 val |= evt;
461 armv6_pmcr_write(val); 462 armv6_pmcr_write(val);
462 raw_spin_unlock_irqrestore(&pmu_lock, flags); 463 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
464}
465
466static int counter_is_active(unsigned long pmcr, int idx)
467{
468 unsigned long mask = 0;
469 if (idx == ARMV6_CYCLE_COUNTER)
470 mask = ARMV6_PMCR_CCOUNT_IEN;
471 else if (idx == ARMV6_COUNTER0)
472 mask = ARMV6_PMCR_COUNT0_IEN;
473 else if (idx == ARMV6_COUNTER1)
474 mask = ARMV6_PMCR_COUNT1_IEN;
475
476 if (mask)
477 return pmcr & mask;
478
479 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
480 return 0;
463} 481}
464 482
465static irqreturn_t 483static irqreturn_t
@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
468{ 486{
469 unsigned long pmcr = armv6_pmcr_read(); 487 unsigned long pmcr = armv6_pmcr_read();
470 struct perf_sample_data data; 488 struct perf_sample_data data;
471 struct cpu_hw_events *cpuc; 489 struct pmu_hw_events *cpuc;
472 struct pt_regs *regs; 490 struct pt_regs *regs;
473 int idx; 491 int idx;
474 492
@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
487 perf_sample_data_init(&data, 0); 505 perf_sample_data_init(&data, 0);
488 506
489 cpuc = &__get_cpu_var(cpu_hw_events); 507 cpuc = &__get_cpu_var(cpu_hw_events);
490 for (idx = 0; idx <= armpmu->num_events; ++idx) { 508 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
491 struct perf_event *event = cpuc->events[idx]; 509 struct perf_event *event = cpuc->events[idx];
492 struct hw_perf_event *hwc; 510 struct hw_perf_event *hwc;
493 511
494 if (!test_bit(idx, cpuc->active_mask)) 512 if (!counter_is_active(pmcr, idx))
495 continue; 513 continue;
496 514
497 /* 515 /*
@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
508 continue; 526 continue;
509 527
510 if (perf_event_overflow(event, &data, regs)) 528 if (perf_event_overflow(event, &data, regs))
511 armpmu->disable(hwc, idx); 529 cpu_pmu->disable(hwc, idx);
512 } 530 }
513 531
514 /* 532 /*
@@ -527,28 +545,30 @@ static void
527armv6pmu_start(void) 545armv6pmu_start(void)
528{ 546{
529 unsigned long flags, val; 547 unsigned long flags, val;
548 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
530 549
531 raw_spin_lock_irqsave(&pmu_lock, flags); 550 raw_spin_lock_irqsave(&events->pmu_lock, flags);
532 val = armv6_pmcr_read(); 551 val = armv6_pmcr_read();
533 val |= ARMV6_PMCR_ENABLE; 552 val |= ARMV6_PMCR_ENABLE;
534 armv6_pmcr_write(val); 553 armv6_pmcr_write(val);
535 raw_spin_unlock_irqrestore(&pmu_lock, flags); 554 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
536} 555}
537 556
538static void 557static void
539armv6pmu_stop(void) 558armv6pmu_stop(void)
540{ 559{
541 unsigned long flags, val; 560 unsigned long flags, val;
561 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
542 562
543 raw_spin_lock_irqsave(&pmu_lock, flags); 563 raw_spin_lock_irqsave(&events->pmu_lock, flags);
544 val = armv6_pmcr_read(); 564 val = armv6_pmcr_read();
545 val &= ~ARMV6_PMCR_ENABLE; 565 val &= ~ARMV6_PMCR_ENABLE;
546 armv6_pmcr_write(val); 566 armv6_pmcr_write(val);
547 raw_spin_unlock_irqrestore(&pmu_lock, flags); 567 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
548} 568}
549 569
550static int 570static int
551armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, 571armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
552 struct hw_perf_event *event) 572 struct hw_perf_event *event)
553{ 573{
554 /* Always place a cycle counter into the cycle counter. */ 574 /* Always place a cycle counter into the cycle counter. */
@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
578 int idx) 598 int idx)
579{ 599{
580 unsigned long val, mask, evt, flags; 600 unsigned long val, mask, evt, flags;
601 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
581 602
582 if (ARMV6_CYCLE_COUNTER == idx) { 603 if (ARMV6_CYCLE_COUNTER == idx) {
583 mask = ARMV6_PMCR_CCOUNT_IEN; 604 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
598 * of ETM bus signal assertion cycles. The external reporting should 619 * of ETM bus signal assertion cycles. The external reporting should
599 * be disabled and so this should never increment. 620 * be disabled and so this should never increment.
600 */ 621 */
601 raw_spin_lock_irqsave(&pmu_lock, flags); 622 raw_spin_lock_irqsave(&events->pmu_lock, flags);
602 val = armv6_pmcr_read(); 623 val = armv6_pmcr_read();
603 val &= ~mask; 624 val &= ~mask;
604 val |= evt; 625 val |= evt;
605 armv6_pmcr_write(val); 626 armv6_pmcr_write(val);
606 raw_spin_unlock_irqrestore(&pmu_lock, flags); 627 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
607} 628}
608 629
609static void 630static void
@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
611 int idx) 632 int idx)
612{ 633{
613 unsigned long val, mask, flags, evt = 0; 634 unsigned long val, mask, flags, evt = 0;
635 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
614 636
615 if (ARMV6_CYCLE_COUNTER == idx) { 637 if (ARMV6_CYCLE_COUNTER == idx) {
616 mask = ARMV6_PMCR_CCOUNT_IEN; 638 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
627 * Unlike UP ARMv6, we don't have a way of stopping the counters. We 649 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
628 * simply disable the interrupt reporting. 650 * simply disable the interrupt reporting.
629 */ 651 */
630 raw_spin_lock_irqsave(&pmu_lock, flags); 652 raw_spin_lock_irqsave(&events->pmu_lock, flags);
631 val = armv6_pmcr_read(); 653 val = armv6_pmcr_read();
632 val &= ~mask; 654 val &= ~mask;
633 val |= evt; 655 val |= evt;
634 armv6_pmcr_write(val); 656 armv6_pmcr_write(val);
635 raw_spin_unlock_irqrestore(&pmu_lock, flags); 657 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
658}
659
660static int armv6_map_event(struct perf_event *event)
661{
662 return map_cpu_event(event, &armv6_perf_map,
663 &armv6_perf_cache_map, 0xFF);
636} 664}
637 665
638static const struct arm_pmu armv6pmu = { 666static struct arm_pmu armv6pmu = {
639 .id = ARM_PERF_PMU_ID_V6, 667 .id = ARM_PERF_PMU_ID_V6,
640 .name = "v6", 668 .name = "v6",
641 .handle_irq = armv6pmu_handle_irq, 669 .handle_irq = armv6pmu_handle_irq,
@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
646 .get_event_idx = armv6pmu_get_event_idx, 674 .get_event_idx = armv6pmu_get_event_idx,
647 .start = armv6pmu_start, 675 .start = armv6pmu_start,
648 .stop = armv6pmu_stop, 676 .stop = armv6pmu_stop,
649 .cache_map = &armv6_perf_cache_map, 677 .map_event = armv6_map_event,
650 .event_map = &armv6_perf_map,
651 .raw_event_mask = 0xFF,
652 .num_events = 3, 678 .num_events = 3,
653 .max_period = (1LLU << 32) - 1, 679 .max_period = (1LLU << 32) - 1,
654}; 680};
655 681
656static const struct arm_pmu *__init armv6pmu_init(void) 682static struct arm_pmu *__init armv6pmu_init(void)
657{ 683{
658 return &armv6pmu; 684 return &armv6pmu;
659} 685}
@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
665 * disable the interrupt reporting and update the event. When unthrottling we 691 * disable the interrupt reporting and update the event. When unthrottling we
666 * reset the period and enable the interrupt reporting. 692 * reset the period and enable the interrupt reporting.
667 */ 693 */
668static const struct arm_pmu armv6mpcore_pmu = { 694
695static int armv6mpcore_map_event(struct perf_event *event)
696{
697 return map_cpu_event(event, &armv6mpcore_perf_map,
698 &armv6mpcore_perf_cache_map, 0xFF);
699}
700
701static struct arm_pmu armv6mpcore_pmu = {
669 .id = ARM_PERF_PMU_ID_V6MP, 702 .id = ARM_PERF_PMU_ID_V6MP,
670 .name = "v6mpcore", 703 .name = "v6mpcore",
671 .handle_irq = armv6pmu_handle_irq, 704 .handle_irq = armv6pmu_handle_irq,
@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
676 .get_event_idx = armv6pmu_get_event_idx, 709 .get_event_idx = armv6pmu_get_event_idx,
677 .start = armv6pmu_start, 710 .start = armv6pmu_start,
678 .stop = armv6pmu_stop, 711 .stop = armv6pmu_stop,
679 .cache_map = &armv6mpcore_perf_cache_map, 712 .map_event = armv6mpcore_map_event,
680 .event_map = &armv6mpcore_perf_map,
681 .raw_event_mask = 0xFF,
682 .num_events = 3, 713 .num_events = 3,
683 .max_period = (1LLU << 32) - 1, 714 .max_period = (1LLU << 32) - 1,
684}; 715};
685 716
686static const struct arm_pmu *__init armv6mpcore_pmu_init(void) 717static struct arm_pmu *__init armv6mpcore_pmu_init(void)
687{ 718{
688 return &armv6mpcore_pmu; 719 return &armv6mpcore_pmu;
689} 720}
690#else 721#else
691static const struct arm_pmu *__init armv6pmu_init(void) 722static struct arm_pmu *__init armv6pmu_init(void)
692{ 723{
693 return NULL; 724 return NULL;
694} 725}
695 726
696static const struct arm_pmu *__init armv6mpcore_pmu_init(void) 727static struct arm_pmu *__init armv6mpcore_pmu_init(void)
697{ 728{
698 return NULL; 729 return NULL;
699} 730}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4c851834f68e..98b75738345e 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -17,6 +17,9 @@
17 */ 17 */
18 18
19#ifdef CONFIG_CPU_V7 19#ifdef CONFIG_CPU_V7
20
21static struct arm_pmu armv7pmu;
22
20/* 23/*
21 * Common ARMv7 event types 24 * Common ARMv7 event types
22 * 25 *
@@ -676,23 +679,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
676}; 679};
677 680
678/* 681/*
679 * Perf Events counters 682 * Perf Events' indices
680 */ 683 */
681enum armv7_counters { 684#define ARMV7_IDX_CYCLE_COUNTER 0
682 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ 685#define ARMV7_IDX_COUNTER0 1
683 ARMV7_COUNTER0 = 2, /* First event counter */ 686#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
684}; 687
688#define ARMV7_MAX_COUNTERS 32
689#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
685 690
686/* 691/*
687 * The cycle counter is ARMV7_CYCLE_COUNTER. 692 * ARMv7 low level PMNC access
688 * The first event counter is ARMV7_COUNTER0.
689 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
690 */ 693 */
691#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
692 694
693/* 695/*
694 * ARMv7 low level PMNC access 696 * Perf Event to low level counters mapping
695 */ 697 */
698#define ARMV7_IDX_TO_COUNTER(x) \
699 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
696 700
697/* 701/*
698 * Per-CPU PMNC: config reg 702 * Per-CPU PMNC: config reg
@@ -708,103 +712,76 @@ enum armv7_counters {
708#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ 712#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
709 713
710/* 714/*
711 * Available counters 715 * FLAG: counters overflow flag status reg
712 */
713#define ARMV7_CNT0 0 /* First event counter */
714#define ARMV7_CCNT 31 /* Cycle counter */
715
716/* Perf Event to low level counters mapping */
717#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
718
719/*
720 * CNTENS: counters enable reg
721 */
722#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
723#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
724
725/*
726 * CNTENC: counters disable reg
727 */
728#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
729#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
730
731/*
732 * INTENS: counters overflow interrupt enable reg
733 */
734#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
735#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
736
737/*
738 * INTENC: counters overflow interrupt disable reg
739 */
740#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
741#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
742
743/*
744 * EVTSEL: Event selection reg
745 */ 716 */
746#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ 717#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
718#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
747 719
748/* 720/*
749 * SELECT: Counter selection reg 721 * PMXEVTYPER: Event selection reg
750 */ 722 */
751#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ 723#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
724#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
752 725
753/* 726/*
754 * FLAG: counters overflow flag status reg 727 * Event filters for PMUv2
755 */ 728 */
756#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) 729#define ARMV7_EXCLUDE_PL1 (1 << 31)
757#define ARMV7_FLAG_C (1 << ARMV7_CCNT) 730#define ARMV7_EXCLUDE_USER (1 << 30)
758#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ 731#define ARMV7_INCLUDE_HYP (1 << 27)
759#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
760 732
761static inline unsigned long armv7_pmnc_read(void) 733static inline u32 armv7_pmnc_read(void)
762{ 734{
763 u32 val; 735 u32 val;
764 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); 736 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
765 return val; 737 return val;
766} 738}
767 739
768static inline void armv7_pmnc_write(unsigned long val) 740static inline void armv7_pmnc_write(u32 val)
769{ 741{
770 val &= ARMV7_PMNC_MASK; 742 val &= ARMV7_PMNC_MASK;
771 isb(); 743 isb();
772 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); 744 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
773} 745}
774 746
775static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) 747static inline int armv7_pmnc_has_overflowed(u32 pmnc)
776{ 748{
777 return pmnc & ARMV7_OVERFLOWED_MASK; 749 return pmnc & ARMV7_OVERFLOWED_MASK;
778} 750}
779 751
780static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, 752static inline int armv7_pmnc_counter_valid(int idx)
781 enum armv7_counters counter) 753{
754 return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
755}
756
757static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
782{ 758{
783 int ret = 0; 759 int ret = 0;
760 u32 counter;
784 761
785 if (counter == ARMV7_CYCLE_COUNTER) 762 if (!armv7_pmnc_counter_valid(idx)) {
786 ret = pmnc & ARMV7_FLAG_C;
787 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
788 ret = pmnc & ARMV7_FLAG_P(counter);
789 else
790 pr_err("CPU%u checking wrong counter %d overflow status\n", 763 pr_err("CPU%u checking wrong counter %d overflow status\n",
791 smp_processor_id(), counter); 764 smp_processor_id(), idx);
765 } else {
766 counter = ARMV7_IDX_TO_COUNTER(idx);
767 ret = pmnc & BIT(counter);
768 }
792 769
793 return ret; 770 return ret;
794} 771}
795 772
796static inline int armv7_pmnc_select_counter(unsigned int idx) 773static inline int armv7_pmnc_select_counter(int idx)
797{ 774{
798 u32 val; 775 u32 counter;
799 776
800 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { 777 if (!armv7_pmnc_counter_valid(idx)) {
801 pr_err("CPU%u selecting wrong PMNC counter" 778 pr_err("CPU%u selecting wrong PMNC counter %d\n",
802 " %d\n", smp_processor_id(), idx); 779 smp_processor_id(), idx);
803 return -1; 780 return -EINVAL;
804 } 781 }
805 782
806 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; 783 counter = ARMV7_IDX_TO_COUNTER(idx);
807 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); 784 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
808 isb(); 785 isb();
809 786
810 return idx; 787 return idx;
@@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
812 789
813static inline u32 armv7pmu_read_counter(int idx) 790static inline u32 armv7pmu_read_counter(int idx)
814{ 791{
815 unsigned long value = 0; 792 u32 value = 0;
816 793
817 if (idx == ARMV7_CYCLE_COUNTER) 794 if (!armv7_pmnc_counter_valid(idx))
818 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
819 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
820 if (armv7_pmnc_select_counter(idx) == idx)
821 asm volatile("mrc p15, 0, %0, c9, c13, 2"
822 : "=r" (value));
823 } else
824 pr_err("CPU%u reading wrong counter %d\n", 795 pr_err("CPU%u reading wrong counter %d\n",
825 smp_processor_id(), idx); 796 smp_processor_id(), idx);
797 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
798 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
799 else if (armv7_pmnc_select_counter(idx) == idx)
800 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
826 801
827 return value; 802 return value;
828} 803}
829 804
830static inline void armv7pmu_write_counter(int idx, u32 value) 805static inline void armv7pmu_write_counter(int idx, u32 value)
831{ 806{
832 if (idx == ARMV7_CYCLE_COUNTER) 807 if (!armv7_pmnc_counter_valid(idx))
833 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
834 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
835 if (armv7_pmnc_select_counter(idx) == idx)
836 asm volatile("mcr p15, 0, %0, c9, c13, 2"
837 : : "r" (value));
838 } else
839 pr_err("CPU%u writing wrong counter %d\n", 808 pr_err("CPU%u writing wrong counter %d\n",
840 smp_processor_id(), idx); 809 smp_processor_id(), idx);
810 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
811 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
812 else if (armv7_pmnc_select_counter(idx) == idx)
813 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
841} 814}
842 815
843static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) 816static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
844{ 817{
845 if (armv7_pmnc_select_counter(idx) == idx) { 818 if (armv7_pmnc_select_counter(idx) == idx) {
846 val &= ARMV7_EVTSEL_MASK; 819 val &= ARMV7_EVTYPE_MASK;
847 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); 820 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
848 } 821 }
849} 822}
850 823
851static inline u32 armv7_pmnc_enable_counter(unsigned int idx) 824static inline int armv7_pmnc_enable_counter(int idx)
852{ 825{
853 u32 val; 826 u32 counter;
854 827
855 if ((idx != ARMV7_CYCLE_COUNTER) && 828 if (!armv7_pmnc_counter_valid(idx)) {
856 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { 829 pr_err("CPU%u enabling wrong PMNC counter %d\n",
857 pr_err("CPU%u enabling wrong PMNC counter" 830 smp_processor_id(), idx);
858 " %d\n", smp_processor_id(), idx); 831 return -EINVAL;
859 return -1;
860 } 832 }
861 833
862 if (idx == ARMV7_CYCLE_COUNTER) 834 counter = ARMV7_IDX_TO_COUNTER(idx);
863 val = ARMV7_CNTENS_C; 835 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
864 else
865 val = ARMV7_CNTENS_P(idx);
866
867 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
868
869 return idx; 836 return idx;
870} 837}
871 838
872static inline u32 armv7_pmnc_disable_counter(unsigned int idx) 839static inline int armv7_pmnc_disable_counter(int idx)
873{ 840{
874 u32 val; 841 u32 counter;
875
876 842
877 if ((idx != ARMV7_CYCLE_COUNTER) && 843 if (!armv7_pmnc_counter_valid(idx)) {
878 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { 844 pr_err("CPU%u disabling wrong PMNC counter %d\n",
879 pr_err("CPU%u disabling wrong PMNC counter" 845 smp_processor_id(), idx);
880 " %d\n", smp_processor_id(), idx); 846 return -EINVAL;
881 return -1;
882 } 847 }
883 848
884 if (idx == ARMV7_CYCLE_COUNTER) 849 counter = ARMV7_IDX_TO_COUNTER(idx);
885 val = ARMV7_CNTENC_C; 850 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
886 else
887 val = ARMV7_CNTENC_P(idx);
888
889 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
890
891 return idx; 851 return idx;
892} 852}
893 853
894static inline u32 armv7_pmnc_enable_intens(unsigned int idx) 854static inline int armv7_pmnc_enable_intens(int idx)
895{ 855{
896 u32 val; 856 u32 counter;
897 857
898 if ((idx != ARMV7_CYCLE_COUNTER) && 858 if (!armv7_pmnc_counter_valid(idx)) {
899 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { 859 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
900 pr_err("CPU%u enabling wrong PMNC counter" 860 smp_processor_id(), idx);
901 " interrupt enable %d\n", smp_processor_id(), idx); 861 return -EINVAL;
902 return -1;
903 } 862 }
904 863
905 if (idx == ARMV7_CYCLE_COUNTER) 864 counter = ARMV7_IDX_TO_COUNTER(idx);
906 val = ARMV7_INTENS_C; 865 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
907 else
908 val = ARMV7_INTENS_P(idx);
909
910 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
911
912 return idx; 866 return idx;
913} 867}
914 868
915static inline u32 armv7_pmnc_disable_intens(unsigned int idx) 869static inline int armv7_pmnc_disable_intens(int idx)
916{ 870{
917 u32 val; 871 u32 counter;
918 872
919 if ((idx != ARMV7_CYCLE_COUNTER) && 873 if (!armv7_pmnc_counter_valid(idx)) {
920 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { 874 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
921 pr_err("CPU%u disabling wrong PMNC counter" 875 smp_processor_id(), idx);
922 " interrupt enable %d\n", smp_processor_id(), idx); 876 return -EINVAL;
923 return -1;
924 } 877 }
925 878
926 if (idx == ARMV7_CYCLE_COUNTER) 879 counter = ARMV7_IDX_TO_COUNTER(idx);
927 val = ARMV7_INTENC_C; 880 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
928 else
929 val = ARMV7_INTENC_P(idx);
930
931 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
932
933 return idx; 881 return idx;
934} 882}
935 883
@@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void)
973 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); 921 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
974 printk(KERN_INFO "CCNT =0x%08x\n", val); 922 printk(KERN_INFO "CCNT =0x%08x\n", val);
975 923
976 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { 924 for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
977 armv7_pmnc_select_counter(cnt); 925 armv7_pmnc_select_counter(cnt);
978 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); 926 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
979 printk(KERN_INFO "CNT[%d] count =0x%08x\n", 927 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
980 cnt-ARMV7_EVENT_CNT_TO_CNTx, val); 928 ARMV7_IDX_TO_COUNTER(cnt), val);
981 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); 929 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
982 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", 930 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
983 cnt-ARMV7_EVENT_CNT_TO_CNTx, val); 931 ARMV7_IDX_TO_COUNTER(cnt), val);
984 } 932 }
985} 933}
986#endif 934#endif
@@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void)
988static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) 936static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
989{ 937{
990 unsigned long flags; 938 unsigned long flags;
939 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
991 940
992 /* 941 /*
993 * Enable counter and interrupt, and set the counter to count 942 * Enable counter and interrupt, and set the counter to count
994 * the event that we're interested in. 943 * the event that we're interested in.
995 */ 944 */
996 raw_spin_lock_irqsave(&pmu_lock, flags); 945 raw_spin_lock_irqsave(&events->pmu_lock, flags);
997 946
998 /* 947 /*
999 * Disable counter 948 * Disable counter
@@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1002 951
1003 /* 952 /*
1004 * Set event (if destined for PMNx counters) 953 * Set event (if destined for PMNx counters)
1005 * We don't need to set the event if it's a cycle count 954 * We only need to set the event for the cycle counter if we
955 * have the ability to perform event filtering.
1006 */ 956 */
1007 if (idx != ARMV7_CYCLE_COUNTER) 957 if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1008 armv7_pmnc_write_evtsel(idx, hwc->config_base); 958 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1009 959
1010 /* 960 /*
@@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1017 */ 967 */
1018 armv7_pmnc_enable_counter(idx); 968 armv7_pmnc_enable_counter(idx);
1019 969
1020 raw_spin_unlock_irqrestore(&pmu_lock, flags); 970 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1021} 971}
1022 972
1023static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) 973static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1024{ 974{
1025 unsigned long flags; 975 unsigned long flags;
976 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1026 977
1027 /* 978 /*
1028 * Disable counter and interrupt 979 * Disable counter and interrupt
1029 */ 980 */
1030 raw_spin_lock_irqsave(&pmu_lock, flags); 981 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1031 982
1032 /* 983 /*
1033 * Disable counter 984 * Disable counter
@@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1039 */ 990 */
1040 armv7_pmnc_disable_intens(idx); 991 armv7_pmnc_disable_intens(idx);
1041 992
1042 raw_spin_unlock_irqrestore(&pmu_lock, flags); 993 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1043} 994}
1044 995
1045static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) 996static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1046{ 997{
1047 unsigned long pmnc; 998 u32 pmnc;
1048 struct perf_sample_data data; 999 struct perf_sample_data data;
1049 struct cpu_hw_events *cpuc; 1000 struct pmu_hw_events *cpuc;
1050 struct pt_regs *regs; 1001 struct pt_regs *regs;
1051 int idx; 1002 int idx;
1052 1003
@@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1069 perf_sample_data_init(&data, 0); 1020 perf_sample_data_init(&data, 0);
1070 1021
1071 cpuc = &__get_cpu_var(cpu_hw_events); 1022 cpuc = &__get_cpu_var(cpu_hw_events);
1072 for (idx = 0; idx <= armpmu->num_events; ++idx) { 1023 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1073 struct perf_event *event = cpuc->events[idx]; 1024 struct perf_event *event = cpuc->events[idx];
1074 struct hw_perf_event *hwc; 1025 struct hw_perf_event *hwc;
1075 1026
1076 if (!test_bit(idx, cpuc->active_mask))
1077 continue;
1078
1079 /* 1027 /*
1080 * We have a single interrupt for all counters. Check that 1028 * We have a single interrupt for all counters. Check that
1081 * each counter has overflowed before we process it. 1029 * each counter has overflowed before we process it.
@@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1090 continue; 1038 continue;
1091 1039
1092 if (perf_event_overflow(event, &data, regs)) 1040 if (perf_event_overflow(event, &data, regs))
1093 armpmu->disable(hwc, idx); 1041 cpu_pmu->disable(hwc, idx);
1094 } 1042 }
1095 1043
1096 /* 1044 /*
@@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1108static void armv7pmu_start(void) 1056static void armv7pmu_start(void)
1109{ 1057{
1110 unsigned long flags; 1058 unsigned long flags;
1059 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1111 1060
1112 raw_spin_lock_irqsave(&pmu_lock, flags); 1061 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1113 /* Enable all counters */ 1062 /* Enable all counters */
1114 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); 1063 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1115 raw_spin_unlock_irqrestore(&pmu_lock, flags); 1064 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1116} 1065}
1117 1066
1118static void armv7pmu_stop(void) 1067static void armv7pmu_stop(void)
1119{ 1068{
1120 unsigned long flags; 1069 unsigned long flags;
1070 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1121 1071
1122 raw_spin_lock_irqsave(&pmu_lock, flags); 1072 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1123 /* Disable all counters */ 1073 /* Disable all counters */
1124 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); 1074 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1125 raw_spin_unlock_irqrestore(&pmu_lock, flags); 1075 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1126} 1076}
1127 1077
1128static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, 1078static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1129 struct hw_perf_event *event) 1079 struct hw_perf_event *event)
1130{ 1080{
1131 int idx; 1081 int idx;
1082 unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
1132 1083
1133 /* Always place a cycle counter into the cycle counter. */ 1084 /* Always place a cycle counter into the cycle counter. */
1134 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { 1085 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1135 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) 1086 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1136 return -EAGAIN; 1087 return -EAGAIN;
1137 1088
1138 return ARMV7_CYCLE_COUNTER; 1089 return ARMV7_IDX_CYCLE_COUNTER;
1139 } else { 1090 }
1140 /*
1141 * For anything other than a cycle counter, try and use
1142 * the events counters
1143 */
1144 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
1145 if (!test_and_set_bit(idx, cpuc->used_mask))
1146 return idx;
1147 }
1148 1091
1149 /* The counters are all in use. */ 1092 /*
1150 return -EAGAIN; 1093 * For anything other than a cycle counter, try and use
1094 * the events counters
1095 */
1096 for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1097 if (!test_and_set_bit(idx, cpuc->used_mask))
1098 return idx;
1151 } 1099 }
1100
1101 /* The counters are all in use. */
1102 return -EAGAIN;
1103}
1104
1105/*
1106 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1107 */
1108static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1109 struct perf_event_attr *attr)
1110{
1111 unsigned long config_base = 0;
1112
1113 if (attr->exclude_idle)
1114 return -EPERM;
1115 if (attr->exclude_user)
1116 config_base |= ARMV7_EXCLUDE_USER;
1117 if (attr->exclude_kernel)
1118 config_base |= ARMV7_EXCLUDE_PL1;
1119 if (!attr->exclude_hv)
1120 config_base |= ARMV7_INCLUDE_HYP;
1121
1122 /*
1123 * Install the filter into config_base as this is used to
1124 * construct the event type.
1125 */
1126 event->config_base = config_base;
1127
1128 return 0;
1152} 1129}
1153 1130
1154static void armv7pmu_reset(void *info) 1131static void armv7pmu_reset(void *info)
1155{ 1132{
1156 u32 idx, nb_cnt = armpmu->num_events; 1133 u32 idx, nb_cnt = cpu_pmu->num_events;
1157 1134
1158 /* The counter and interrupt enable registers are unknown at reset. */ 1135 /* The counter and interrupt enable registers are unknown at reset. */
1159 for (idx = 1; idx < nb_cnt; ++idx) 1136 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1160 armv7pmu_disable_event(NULL, idx); 1137 armv7pmu_disable_event(NULL, idx);
1161 1138
1162 /* Initialize & Reset PMNC: C and P bits */ 1139 /* Initialize & Reset PMNC: C and P bits */
1163 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); 1140 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1164} 1141}
1165 1142
1143static int armv7_a8_map_event(struct perf_event *event)
1144{
1145 return map_cpu_event(event, &armv7_a8_perf_map,
1146 &armv7_a8_perf_cache_map, 0xFF);
1147}
1148
1149static int armv7_a9_map_event(struct perf_event *event)
1150{
1151 return map_cpu_event(event, &armv7_a9_perf_map,
1152 &armv7_a9_perf_cache_map, 0xFF);
1153}
1154
1155static int armv7_a5_map_event(struct perf_event *event)
1156{
1157 return map_cpu_event(event, &armv7_a5_perf_map,
1158 &armv7_a5_perf_cache_map, 0xFF);
1159}
1160
1161static int armv7_a15_map_event(struct perf_event *event)
1162{
1163 return map_cpu_event(event, &armv7_a15_perf_map,
1164 &armv7_a15_perf_cache_map, 0xFF);
1165}
1166
1166static struct arm_pmu armv7pmu = { 1167static struct arm_pmu armv7pmu = {
1167 .handle_irq = armv7pmu_handle_irq, 1168 .handle_irq = armv7pmu_handle_irq,
1168 .enable = armv7pmu_enable_event, 1169 .enable = armv7pmu_enable_event,
@@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = {
1173 .start = armv7pmu_start, 1174 .start = armv7pmu_start,
1174 .stop = armv7pmu_stop, 1175 .stop = armv7pmu_stop,
1175 .reset = armv7pmu_reset, 1176 .reset = armv7pmu_reset,
1176 .raw_event_mask = 0xFF,
1177 .max_period = (1LLU << 32) - 1, 1177 .max_period = (1LLU << 32) - 1,
1178}; 1178};
1179 1179
@@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void)
1188 return nb_cnt + 1; 1188 return nb_cnt + 1;
1189} 1189}
1190 1190
1191static const struct arm_pmu *__init armv7_a8_pmu_init(void) 1191static struct arm_pmu *__init armv7_a8_pmu_init(void)
1192{ 1192{
1193 armv7pmu.id = ARM_PERF_PMU_ID_CA8; 1193 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
1194 armv7pmu.name = "ARMv7 Cortex-A8"; 1194 armv7pmu.name = "ARMv7 Cortex-A8";
1195 armv7pmu.cache_map = &armv7_a8_perf_cache_map; 1195 armv7pmu.map_event = armv7_a8_map_event;
1196 armv7pmu.event_map = &armv7_a8_perf_map;
1197 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1196 armv7pmu.num_events = armv7_read_num_pmnc_events();
1198 return &armv7pmu; 1197 return &armv7pmu;
1199} 1198}
1200 1199
1201static const struct arm_pmu *__init armv7_a9_pmu_init(void) 1200static struct arm_pmu *__init armv7_a9_pmu_init(void)
1202{ 1201{
1203 armv7pmu.id = ARM_PERF_PMU_ID_CA9; 1202 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
1204 armv7pmu.name = "ARMv7 Cortex-A9"; 1203 armv7pmu.name = "ARMv7 Cortex-A9";
1205 armv7pmu.cache_map = &armv7_a9_perf_cache_map; 1204 armv7pmu.map_event = armv7_a9_map_event;
1206 armv7pmu.event_map = &armv7_a9_perf_map;
1207 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1205 armv7pmu.num_events = armv7_read_num_pmnc_events();
1208 return &armv7pmu; 1206 return &armv7pmu;
1209} 1207}
1210 1208
1211static const struct arm_pmu *__init armv7_a5_pmu_init(void) 1209static struct arm_pmu *__init armv7_a5_pmu_init(void)
1212{ 1210{
1213 armv7pmu.id = ARM_PERF_PMU_ID_CA5; 1211 armv7pmu.id = ARM_PERF_PMU_ID_CA5;
1214 armv7pmu.name = "ARMv7 Cortex-A5"; 1212 armv7pmu.name = "ARMv7 Cortex-A5";
1215 armv7pmu.cache_map = &armv7_a5_perf_cache_map; 1213 armv7pmu.map_event = armv7_a5_map_event;
1216 armv7pmu.event_map = &armv7_a5_perf_map;
1217 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1214 armv7pmu.num_events = armv7_read_num_pmnc_events();
1218 return &armv7pmu; 1215 return &armv7pmu;
1219} 1216}
1220 1217
1221static const struct arm_pmu *__init armv7_a15_pmu_init(void) 1218static struct arm_pmu *__init armv7_a15_pmu_init(void)
1222{ 1219{
1223 armv7pmu.id = ARM_PERF_PMU_ID_CA15; 1220 armv7pmu.id = ARM_PERF_PMU_ID_CA15;
1224 armv7pmu.name = "ARMv7 Cortex-A15"; 1221 armv7pmu.name = "ARMv7 Cortex-A15";
1225 armv7pmu.cache_map = &armv7_a15_perf_cache_map; 1222 armv7pmu.map_event = armv7_a15_map_event;
1226 armv7pmu.event_map = &armv7_a15_perf_map;
1227 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1223 armv7pmu.num_events = armv7_read_num_pmnc_events();
1224 armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1228 return &armv7pmu; 1225 return &armv7pmu;
1229} 1226}
1230#else 1227#else
1231static const struct arm_pmu *__init armv7_a8_pmu_init(void) 1228static struct arm_pmu *__init armv7_a8_pmu_init(void)
1232{ 1229{
1233 return NULL; 1230 return NULL;
1234} 1231}
1235 1232
1236static const struct arm_pmu *__init armv7_a9_pmu_init(void) 1233static struct arm_pmu *__init armv7_a9_pmu_init(void)
1237{ 1234{
1238 return NULL; 1235 return NULL;
1239} 1236}
1240 1237
1241static const struct arm_pmu *__init armv7_a5_pmu_init(void) 1238static struct arm_pmu *__init armv7_a5_pmu_init(void)
1242{ 1239{
1243 return NULL; 1240 return NULL;
1244} 1241}
1245 1242
1246static const struct arm_pmu *__init armv7_a15_pmu_init(void) 1243static struct arm_pmu *__init armv7_a15_pmu_init(void)
1247{ 1244{
1248 return NULL; 1245 return NULL;
1249} 1246}
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3c4397491d08..e0cca10a8411 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -40,7 +40,7 @@ enum xscale_perf_types {
40}; 40};
41 41
42enum xscale_counters { 42enum xscale_counters {
43 XSCALE_CYCLE_COUNTER = 1, 43 XSCALE_CYCLE_COUNTER = 0,
44 XSCALE_COUNTER0, 44 XSCALE_COUNTER0,
45 XSCALE_COUNTER1, 45 XSCALE_COUNTER1,
46 XSCALE_COUNTER2, 46 XSCALE_COUNTER2,
@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
222{ 222{
223 unsigned long pmnc; 223 unsigned long pmnc;
224 struct perf_sample_data data; 224 struct perf_sample_data data;
225 struct cpu_hw_events *cpuc; 225 struct pmu_hw_events *cpuc;
226 struct pt_regs *regs; 226 struct pt_regs *regs;
227 int idx; 227 int idx;
228 228
@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
249 perf_sample_data_init(&data, 0); 249 perf_sample_data_init(&data, 0);
250 250
251 cpuc = &__get_cpu_var(cpu_hw_events); 251 cpuc = &__get_cpu_var(cpu_hw_events);
252 for (idx = 0; idx <= armpmu->num_events; ++idx) { 252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
253 struct perf_event *event = cpuc->events[idx]; 253 struct perf_event *event = cpuc->events[idx];
254 struct hw_perf_event *hwc; 254 struct hw_perf_event *hwc;
255 255
256 if (!test_bit(idx, cpuc->active_mask))
257 continue;
258
259 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) 256 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
260 continue; 257 continue;
261 258
@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
266 continue; 263 continue;
267 264
268 if (perf_event_overflow(event, &data, regs)) 265 if (perf_event_overflow(event, &data, regs))
269 armpmu->disable(hwc, idx); 266 cpu_pmu->disable(hwc, idx);
270 } 267 }
271 268
272 irq_work_run(); 269 irq_work_run();
@@ -284,6 +281,7 @@ static void
284xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) 281xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
285{ 282{
286 unsigned long val, mask, evt, flags; 283 unsigned long val, mask, evt, flags;
284 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
287 285
288 switch (idx) { 286 switch (idx) {
289 case XSCALE_CYCLE_COUNTER: 287 case XSCALE_CYCLE_COUNTER:
@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
305 return; 303 return;
306 } 304 }
307 305
308 raw_spin_lock_irqsave(&pmu_lock, flags); 306 raw_spin_lock_irqsave(&events->pmu_lock, flags);
309 val = xscale1pmu_read_pmnc(); 307 val = xscale1pmu_read_pmnc();
310 val &= ~mask; 308 val &= ~mask;
311 val |= evt; 309 val |= evt;
312 xscale1pmu_write_pmnc(val); 310 xscale1pmu_write_pmnc(val);
313 raw_spin_unlock_irqrestore(&pmu_lock, flags); 311 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
314} 312}
315 313
316static void 314static void
317xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) 315xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
318{ 316{
319 unsigned long val, mask, evt, flags; 317 unsigned long val, mask, evt, flags;
318 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
320 319
321 switch (idx) { 320 switch (idx) {
322 case XSCALE_CYCLE_COUNTER: 321 case XSCALE_CYCLE_COUNTER:
@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
336 return; 335 return;
337 } 336 }
338 337
339 raw_spin_lock_irqsave(&pmu_lock, flags); 338 raw_spin_lock_irqsave(&events->pmu_lock, flags);
340 val = xscale1pmu_read_pmnc(); 339 val = xscale1pmu_read_pmnc();
341 val &= ~mask; 340 val &= ~mask;
342 val |= evt; 341 val |= evt;
343 xscale1pmu_write_pmnc(val); 342 xscale1pmu_write_pmnc(val);
344 raw_spin_unlock_irqrestore(&pmu_lock, flags); 343 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
345} 344}
346 345
347static int 346static int
348xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, 347xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
349 struct hw_perf_event *event) 348 struct hw_perf_event *event)
350{ 349{
351 if (XSCALE_PERFCTR_CCNT == event->config_base) { 350 if (XSCALE_PERFCTR_CCNT == event->config_base) {
@@ -368,24 +367,26 @@ static void
368xscale1pmu_start(void) 367xscale1pmu_start(void)
369{ 368{
370 unsigned long flags, val; 369 unsigned long flags, val;
370 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
371 371
372 raw_spin_lock_irqsave(&pmu_lock, flags); 372 raw_spin_lock_irqsave(&events->pmu_lock, flags);
373 val = xscale1pmu_read_pmnc(); 373 val = xscale1pmu_read_pmnc();
374 val |= XSCALE_PMU_ENABLE; 374 val |= XSCALE_PMU_ENABLE;
375 xscale1pmu_write_pmnc(val); 375 xscale1pmu_write_pmnc(val);
376 raw_spin_unlock_irqrestore(&pmu_lock, flags); 376 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
377} 377}
378 378
379static void 379static void
380xscale1pmu_stop(void) 380xscale1pmu_stop(void)
381{ 381{
382 unsigned long flags, val; 382 unsigned long flags, val;
383 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
383 384
384 raw_spin_lock_irqsave(&pmu_lock, flags); 385 raw_spin_lock_irqsave(&events->pmu_lock, flags);
385 val = xscale1pmu_read_pmnc(); 386 val = xscale1pmu_read_pmnc();
386 val &= ~XSCALE_PMU_ENABLE; 387 val &= ~XSCALE_PMU_ENABLE;
387 xscale1pmu_write_pmnc(val); 388 xscale1pmu_write_pmnc(val);
388 raw_spin_unlock_irqrestore(&pmu_lock, flags); 389 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
389} 390}
390 391
391static inline u32 392static inline u32
@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
424 } 425 }
425} 426}
426 427
427static const struct arm_pmu xscale1pmu = { 428static int xscale_map_event(struct perf_event *event)
429{
430 return map_cpu_event(event, &xscale_perf_map,
431 &xscale_perf_cache_map, 0xFF);
432}
433
434static struct arm_pmu xscale1pmu = {
428 .id = ARM_PERF_PMU_ID_XSCALE1, 435 .id = ARM_PERF_PMU_ID_XSCALE1,
429 .name = "xscale1", 436 .name = "xscale1",
430 .handle_irq = xscale1pmu_handle_irq, 437 .handle_irq = xscale1pmu_handle_irq,
@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
435 .get_event_idx = xscale1pmu_get_event_idx, 442 .get_event_idx = xscale1pmu_get_event_idx,
436 .start = xscale1pmu_start, 443 .start = xscale1pmu_start,
437 .stop = xscale1pmu_stop, 444 .stop = xscale1pmu_stop,
438 .cache_map = &xscale_perf_cache_map, 445 .map_event = xscale_map_event,
439 .event_map = &xscale_perf_map,
440 .raw_event_mask = 0xFF,
441 .num_events = 3, 446 .num_events = 3,
442 .max_period = (1LLU << 32) - 1, 447 .max_period = (1LLU << 32) - 1,
443}; 448};
444 449
445static const struct arm_pmu *__init xscale1pmu_init(void) 450static struct arm_pmu *__init xscale1pmu_init(void)
446{ 451{
447 return &xscale1pmu; 452 return &xscale1pmu;
448} 453}
@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
560{ 565{
561 unsigned long pmnc, of_flags; 566 unsigned long pmnc, of_flags;
562 struct perf_sample_data data; 567 struct perf_sample_data data;
563 struct cpu_hw_events *cpuc; 568 struct pmu_hw_events *cpuc;
564 struct pt_regs *regs; 569 struct pt_regs *regs;
565 int idx; 570 int idx;
566 571
@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
581 perf_sample_data_init(&data, 0); 586 perf_sample_data_init(&data, 0);
582 587
583 cpuc = &__get_cpu_var(cpu_hw_events); 588 cpuc = &__get_cpu_var(cpu_hw_events);
584 for (idx = 0; idx <= armpmu->num_events; ++idx) { 589 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
585 struct perf_event *event = cpuc->events[idx]; 590 struct perf_event *event = cpuc->events[idx];
586 struct hw_perf_event *hwc; 591 struct hw_perf_event *hwc;
587 592
588 if (!test_bit(idx, cpuc->active_mask))
589 continue;
590
591 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) 593 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
592 continue; 594 continue;
593 595
@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
598 continue; 600 continue;
599 601
600 if (perf_event_overflow(event, &data, regs)) 602 if (perf_event_overflow(event, &data, regs))
601 armpmu->disable(hwc, idx); 603 cpu_pmu->disable(hwc, idx);
602 } 604 }
603 605
604 irq_work_run(); 606 irq_work_run();
@@ -616,6 +618,7 @@ static void
616xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) 618xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
617{ 619{
618 unsigned long flags, ien, evtsel; 620 unsigned long flags, ien, evtsel;
621 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
619 622
620 ien = xscale2pmu_read_int_enable(); 623 ien = xscale2pmu_read_int_enable();
621 evtsel = xscale2pmu_read_event_select(); 624 evtsel = xscale2pmu_read_event_select();
@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
649 return; 652 return;
650 } 653 }
651 654
652 raw_spin_lock_irqsave(&pmu_lock, flags); 655 raw_spin_lock_irqsave(&events->pmu_lock, flags);
653 xscale2pmu_write_event_select(evtsel); 656 xscale2pmu_write_event_select(evtsel);
654 xscale2pmu_write_int_enable(ien); 657 xscale2pmu_write_int_enable(ien);
655 raw_spin_unlock_irqrestore(&pmu_lock, flags); 658 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
656} 659}
657 660
658static void 661static void
659xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) 662xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
660{ 663{
661 unsigned long flags, ien, evtsel; 664 unsigned long flags, ien, evtsel;
665 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
662 666
663 ien = xscale2pmu_read_int_enable(); 667 ien = xscale2pmu_read_int_enable();
664 evtsel = xscale2pmu_read_event_select(); 668 evtsel = xscale2pmu_read_event_select();
@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
692 return; 696 return;
693 } 697 }
694 698
695 raw_spin_lock_irqsave(&pmu_lock, flags); 699 raw_spin_lock_irqsave(&events->pmu_lock, flags);
696 xscale2pmu_write_event_select(evtsel); 700 xscale2pmu_write_event_select(evtsel);
697 xscale2pmu_write_int_enable(ien); 701 xscale2pmu_write_int_enable(ien);
698 raw_spin_unlock_irqrestore(&pmu_lock, flags); 702 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
699} 703}
700 704
701static int 705static int
702xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, 706xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
703 struct hw_perf_event *event) 707 struct hw_perf_event *event)
704{ 708{
705 int idx = xscale1pmu_get_event_idx(cpuc, event); 709 int idx = xscale1pmu_get_event_idx(cpuc, event);
@@ -718,24 +722,26 @@ static void
718xscale2pmu_start(void) 722xscale2pmu_start(void)
719{ 723{
720 unsigned long flags, val; 724 unsigned long flags, val;
725 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
721 726
722 raw_spin_lock_irqsave(&pmu_lock, flags); 727 raw_spin_lock_irqsave(&events->pmu_lock, flags);
723 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; 728 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
724 val |= XSCALE_PMU_ENABLE; 729 val |= XSCALE_PMU_ENABLE;
725 xscale2pmu_write_pmnc(val); 730 xscale2pmu_write_pmnc(val);
726 raw_spin_unlock_irqrestore(&pmu_lock, flags); 731 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
727} 732}
728 733
729static void 734static void
730xscale2pmu_stop(void) 735xscale2pmu_stop(void)
731{ 736{
732 unsigned long flags, val; 737 unsigned long flags, val;
738 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
733 739
734 raw_spin_lock_irqsave(&pmu_lock, flags); 740 raw_spin_lock_irqsave(&events->pmu_lock, flags);
735 val = xscale2pmu_read_pmnc(); 741 val = xscale2pmu_read_pmnc();
736 val &= ~XSCALE_PMU_ENABLE; 742 val &= ~XSCALE_PMU_ENABLE;
737 xscale2pmu_write_pmnc(val); 743 xscale2pmu_write_pmnc(val);
738 raw_spin_unlock_irqrestore(&pmu_lock, flags); 744 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
739} 745}
740 746
741static inline u32 747static inline u32
@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
786 } 792 }
787} 793}
788 794
789static const struct arm_pmu xscale2pmu = { 795static struct arm_pmu xscale2pmu = {
790 .id = ARM_PERF_PMU_ID_XSCALE2, 796 .id = ARM_PERF_PMU_ID_XSCALE2,
791 .name = "xscale2", 797 .name = "xscale2",
792 .handle_irq = xscale2pmu_handle_irq, 798 .handle_irq = xscale2pmu_handle_irq,
@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
797 .get_event_idx = xscale2pmu_get_event_idx, 803 .get_event_idx = xscale2pmu_get_event_idx,
798 .start = xscale2pmu_start, 804 .start = xscale2pmu_start,
799 .stop = xscale2pmu_stop, 805 .stop = xscale2pmu_stop,
800 .cache_map = &xscale_perf_cache_map, 806 .map_event = xscale_map_event,
801 .event_map = &xscale_perf_map,
802 .raw_event_mask = 0xFF,
803 .num_events = 5, 807 .num_events = 5,
804 .max_period = (1LLU << 32) - 1, 808 .max_period = (1LLU << 32) - 1,
805}; 809};
806 810
807static const struct arm_pmu *__init xscale2pmu_init(void) 811static struct arm_pmu *__init xscale2pmu_init(void)
808{ 812{
809 return &xscale2pmu; 813 return &xscale2pmu;
810} 814}
811#else 815#else
812static const struct arm_pmu *__init xscale1pmu_init(void) 816static struct arm_pmu *__init xscale1pmu_init(void)
813{ 817{
814 return NULL; 818 return NULL;
815} 819}
816 820
817static const struct arm_pmu *__init xscale2pmu_init(void) 821static struct arm_pmu *__init xscale2pmu_init(void)
818{ 822{
819 return NULL; 823 return NULL;
820} 824}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2b70709376c3..2c3407ee8576 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -10,192 +10,26 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) "PMU: " fmt
14
15#include <linux/cpumask.h>
16#include <linux/err.h> 13#include <linux/err.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h> 14#include <linux/kernel.h>
19#include <linux/module.h> 15#include <linux/module.h>
20#include <linux/of_device.h>
21#include <linux/platform_device.h>
22 16
23#include <asm/pmu.h> 17#include <asm/pmu.h>
24 18
25static volatile long pmu_lock; 19/*
26 20 * PMU locking to ensure mutual exclusion between different subsystems.
27static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; 21 */
28 22static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
29static int __devinit pmu_register(struct platform_device *pdev,
30 enum arm_pmu_type type)
31{
32 if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
33 pr_warning("received registration request for unknown "
34 "device %d\n", type);
35 return -EINVAL;
36 }
37
38 if (pmu_devices[type]) {
39 pr_warning("rejecting duplicate registration of PMU device "
40 "type %d.", type);
41 return -ENOSPC;
42 }
43
44 pr_info("registered new PMU device of type %d\n", type);
45 pmu_devices[type] = pdev;
46 return 0;
47}
48
49#define OF_MATCH_PMU(_name, _type) { \
50 .compatible = _name, \
51 .data = (void *)_type, \
52}
53
54#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
55
56static struct of_device_id armpmu_of_device_ids[] = {
57 OF_MATCH_CPU("arm,cortex-a9-pmu"),
58 OF_MATCH_CPU("arm,cortex-a8-pmu"),
59 OF_MATCH_CPU("arm,arm1136-pmu"),
60 OF_MATCH_CPU("arm,arm1176-pmu"),
61 {},
62};
63
64#define PLAT_MATCH_PMU(_name, _type) { \
65 .name = _name, \
66 .driver_data = _type, \
67}
68
69#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
70
71static struct platform_device_id armpmu_plat_device_ids[] = {
72 PLAT_MATCH_CPU("arm-pmu"),
73 {},
74};
75
76enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
77{
78 const struct of_device_id *of_id;
79 const struct platform_device_id *pdev_id;
80
81 /* provided by of_device_id table */
82 if (pdev->dev.of_node) {
83 of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
84 BUG_ON(!of_id);
85 return (enum arm_pmu_type)of_id->data;
86 }
87
88 /* Provided by platform_device_id table */
89 pdev_id = platform_get_device_id(pdev);
90 BUG_ON(!pdev_id);
91 return pdev_id->driver_data;
92}
93
94static int __devinit armpmu_device_probe(struct platform_device *pdev)
95{
96 return pmu_register(pdev, armpmu_device_type(pdev));
97}
98
99static struct platform_driver armpmu_driver = {
100 .driver = {
101 .name = "arm-pmu",
102 .of_match_table = armpmu_of_device_ids,
103 },
104 .probe = armpmu_device_probe,
105 .id_table = armpmu_plat_device_ids,
106};
107
108static int __init register_pmu_driver(void)
109{
110 return platform_driver_register(&armpmu_driver);
111}
112device_initcall(register_pmu_driver);
113
114struct platform_device *
115reserve_pmu(enum arm_pmu_type device)
116{
117 struct platform_device *pdev;
118
119 if (test_and_set_bit_lock(device, &pmu_lock)) {
120 pdev = ERR_PTR(-EBUSY);
121 } else if (pmu_devices[device] == NULL) {
122 clear_bit_unlock(device, &pmu_lock);
123 pdev = ERR_PTR(-ENODEV);
124 } else {
125 pdev = pmu_devices[device];
126 }
127
128 return pdev;
129}
130EXPORT_SYMBOL_GPL(reserve_pmu);
131 23
132int 24int
133release_pmu(enum arm_pmu_type device) 25reserve_pmu(enum arm_pmu_type type)
134{
135 if (WARN_ON(!pmu_devices[device]))
136 return -EINVAL;
137 clear_bit_unlock(device, &pmu_lock);
138 return 0;
139}
140EXPORT_SYMBOL_GPL(release_pmu);
141
142static int
143set_irq_affinity(int irq,
144 unsigned int cpu)
145{ 26{
146#ifdef CONFIG_SMP 27 return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
147 int err = irq_set_affinity(irq, cpumask_of(cpu));
148 if (err)
149 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
150 irq, cpu);
151 return err;
152#else
153 return -EINVAL;
154#endif
155}
156
157static int
158init_cpu_pmu(void)
159{
160 int i, irqs, err = 0;
161 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
162
163 if (!pdev)
164 return -ENODEV;
165
166 irqs = pdev->num_resources;
167
168 /*
169 * If we have a single PMU interrupt that we can't shift, assume that
170 * we're running on a uniprocessor machine and continue.
171 */
172 if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
173 return 0;
174
175 for (i = 0; i < irqs; ++i) {
176 err = set_irq_affinity(platform_get_irq(pdev, i), i);
177 if (err)
178 break;
179 }
180
181 return err;
182} 28}
29EXPORT_SYMBOL_GPL(reserve_pmu);
183 30
184int 31void
185init_pmu(enum arm_pmu_type device) 32release_pmu(enum arm_pmu_type type)
186{ 33{
187 int err = 0; 34 clear_bit_unlock(type, pmu_lock);
188
189 switch (device) {
190 case ARM_PMU_DEVICE_CPU:
191 err = init_cpu_pmu();
192 break;
193 default:
194 pr_warning("attempt to initialise unknown device %d\n",
195 device);
196 err = -EINVAL;
197 }
198
199 return err;
200} 35}
201EXPORT_SYMBOL_GPL(init_pmu);
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 9cf4cbf8f95b..d0cdedf4864d 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -57,7 +57,8 @@ relocate_new_kernel:
57 mov r0,#0 57 mov r0,#0
58 ldr r1,kexec_mach_type 58 ldr r1,kexec_mach_type
59 ldr r2,kexec_boot_atags 59 ldr r2,kexec_boot_atags
60 mov pc,lr 60 ARM( mov pc, lr )
61 THUMB( bx lr )
61 62
62 .align 63 .align
63 64
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 78d197d6ec34..6136144f8f8d 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -280,18 +280,19 @@ static void __init cacheid_init(void)
280 if (arch >= CPU_ARCH_ARMv6) { 280 if (arch >= CPU_ARCH_ARMv6) {
281 if ((cachetype & (7 << 29)) == 4 << 29) { 281 if ((cachetype & (7 << 29)) == 4 << 29) {
282 /* ARMv7 register format */ 282 /* ARMv7 register format */
283 arch = CPU_ARCH_ARMv7;
283 cacheid = CACHEID_VIPT_NONALIASING; 284 cacheid = CACHEID_VIPT_NONALIASING;
284 if ((cachetype & (3 << 14)) == 1 << 14) 285 if ((cachetype & (3 << 14)) == 1 << 14)
285 cacheid |= CACHEID_ASID_TAGGED; 286 cacheid |= CACHEID_ASID_TAGGED;
286 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
287 cacheid |= CACHEID_VIPT_I_ALIASING;
288 } else if (cachetype & (1 << 23)) {
289 cacheid = CACHEID_VIPT_ALIASING;
290 } else { 287 } else {
291 cacheid = CACHEID_VIPT_NONALIASING; 288 arch = CPU_ARCH_ARMv6;
292 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6)) 289 if (cachetype & (1 << 23))
293 cacheid |= CACHEID_VIPT_I_ALIASING; 290 cacheid = CACHEID_VIPT_ALIASING;
291 else
292 cacheid = CACHEID_VIPT_NONALIASING;
294 } 293 }
294 if (cpu_has_aliasing_icache(arch))
295 cacheid |= CACHEID_VIPT_I_ALIASING;
295 } else { 296 } else {
296 cacheid = CACHEID_VIVT; 297 cacheid = CACHEID_VIVT;
297 } 298 }
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2c277d40cee6..01c186222f3b 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -137,8 +137,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
137 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); 137 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
138 clk->min_delta_ns = clockevent_delta2ns(0xf, clk); 138 clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
139 139
140 clockevents_register_device(clk);
141
140 /* Make sure our local interrupt controller has this enabled */ 142 /* Make sure our local interrupt controller has this enabled */
141 gic_enable_ppi(clk->irq); 143 gic_enable_ppi(clk->irq);
142
143 clockevents_register_device(clk);
144} 144}
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index d522b47e30b5..6c8e3b5f669f 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -157,7 +157,7 @@ static struct clk_lookup periph_clocks_lookups[] = {
157 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), 157 CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
158 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), 158 CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
159 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), 159 CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
160 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk), 160 CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
161 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), 161 CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
162 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), 162 CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
163 CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk), 163 CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
diff --git a/arch/arm/mach-ep93xx/include/mach/ts72xx.h b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
index 0eabec62cd9d..f1397a13e76b 100644
--- a/arch/arm/mach-ep93xx/include/mach/ts72xx.h
+++ b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
@@ -6,7 +6,7 @@
6 * TS72xx memory map: 6 * TS72xx memory map:
7 * 7 *
8 * virt phys size 8 * virt phys size
9 * febff000 22000000 4K model number register 9 * febff000 22000000 4K model number register (bits 0-2)
10 * febfe000 22400000 4K options register 10 * febfe000 22400000 4K options register
11 * febfd000 22800000 4K options register #2 11 * febfd000 22800000 4K options register #2
12 * febf9000 10800000 4K TS-5620 RTC index register 12 * febf9000 10800000 4K TS-5620 RTC index register
@@ -20,6 +20,9 @@
20#define TS72XX_MODEL_TS7200 0x00 20#define TS72XX_MODEL_TS7200 0x00
21#define TS72XX_MODEL_TS7250 0x01 21#define TS72XX_MODEL_TS7250 0x01
22#define TS72XX_MODEL_TS7260 0x02 22#define TS72XX_MODEL_TS7260 0x02
23#define TS72XX_MODEL_TS7300 0x03
24#define TS72XX_MODEL_TS7400 0x04
25#define TS72XX_MODEL_MASK 0x07
23 26
24 27
25#define TS72XX_OPTIONS_PHYS_BASE 0x22400000 28#define TS72XX_OPTIONS_PHYS_BASE 0x22400000
@@ -51,19 +54,34 @@
51 54
52#ifndef __ASSEMBLY__ 55#ifndef __ASSEMBLY__
53 56
57static inline int ts72xx_model(void)
58{
59 return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK;
60}
61
54static inline int board_is_ts7200(void) 62static inline int board_is_ts7200(void)
55{ 63{
56 return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7200; 64 return ts72xx_model() == TS72XX_MODEL_TS7200;
57} 65}
58 66
59static inline int board_is_ts7250(void) 67static inline int board_is_ts7250(void)
60{ 68{
61 return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7250; 69 return ts72xx_model() == TS72XX_MODEL_TS7250;
62} 70}
63 71
64static inline int board_is_ts7260(void) 72static inline int board_is_ts7260(void)
65{ 73{
66 return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7260; 74 return ts72xx_model() == TS72XX_MODEL_TS7260;
75}
76
77static inline int board_is_ts7300(void)
78{
79 return ts72xx_model() == TS72XX_MODEL_TS7300;
80}
81
82static inline int board_is_ts7400(void)
83{
84 return ts72xx_model() == TS72XX_MODEL_TS7400;
67} 85}
68 86
69static inline int is_max197_installed(void) 87static inline int is_max197_installed(void)
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c
index 851dea018578..1561b036a9bf 100644
--- a/arch/arm/mach-exynos4/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -520,7 +520,7 @@ static struct clk init_clocks_off[] = {
520 .ctrlbit = (1 << 21), 520 .ctrlbit = (1 << 21),
521 }, { 521 }, {
522 .name = "ac97", 522 .name = "ac97",
523 .id = -1, 523 .devname = "samsung-ac97",
524 .enable = exynos4_clk_ip_peril_ctrl, 524 .enable = exynos4_clk_ip_peril_ctrl,
525 .ctrlbit = (1 << 27), 525 .ctrlbit = (1 << 27),
526 }, { 526 }, {
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 2d8a40c9e6e5..746d6fc6d397 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -24,12 +24,13 @@
24#include <plat/exynos4.h> 24#include <plat/exynos4.h>
25#include <plat/adc-core.h> 25#include <plat/adc-core.h>
26#include <plat/sdhci.h> 26#include <plat/sdhci.h>
27#include <plat/devs.h>
28#include <plat/fb-core.h> 27#include <plat/fb-core.h>
29#include <plat/fimc-core.h> 28#include <plat/fimc-core.h>
30#include <plat/iic-core.h> 29#include <plat/iic-core.h>
30#include <plat/reset.h>
31 31
32#include <mach/regs-irq.h> 32#include <mach/regs-irq.h>
33#include <mach/regs-pmu.h>
33 34
34extern int combiner_init(unsigned int combiner_nr, void __iomem *base, 35extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
35 unsigned int irq_start); 36 unsigned int irq_start);
@@ -128,6 +129,11 @@ static void exynos4_idle(void)
128 local_irq_enable(); 129 local_irq_enable();
129} 130}
130 131
132static void exynos4_sw_reset(void)
133{
134 __raw_writel(0x1, S5P_SWRESET);
135}
136
131/* 137/*
132 * exynos4_map_io 138 * exynos4_map_io
133 * 139 *
@@ -241,5 +247,8 @@ int __init exynos4_init(void)
241 /* set idle function */ 247 /* set idle function */
242 pm_idle = exynos4_idle; 248 pm_idle = exynos4_idle;
243 249
250 /* set sw_reset function */
251 s5p_reset_hook = exynos4_sw_reset;
252
244 return sysdev_register(&exynos4_sysdev); 253 return sysdev_register(&exynos4_sysdev);
245} 254}
diff --git a/arch/arm/mach-exynos4/include/mach/irqs.h b/arch/arm/mach-exynos4/include/mach/irqs.h
index 934d2a493982..f8952f8f3757 100644
--- a/arch/arm/mach-exynos4/include/mach/irqs.h
+++ b/arch/arm/mach-exynos4/include/mach/irqs.h
@@ -80,9 +80,8 @@
80#define IRQ_HSMMC3 IRQ_SPI(76) 80#define IRQ_HSMMC3 IRQ_SPI(76)
81#define IRQ_DWMCI IRQ_SPI(77) 81#define IRQ_DWMCI IRQ_SPI(77)
82 82
83#define IRQ_MIPICSI0 IRQ_SPI(78) 83#define IRQ_MIPI_CSIS0 IRQ_SPI(78)
84 84#define IRQ_MIPI_CSIS1 IRQ_SPI(80)
85#define IRQ_MIPICSI1 IRQ_SPI(80)
86 85
87#define IRQ_ONENAND_AUDI IRQ_SPI(82) 86#define IRQ_ONENAND_AUDI IRQ_SPI(82)
88#define IRQ_ROTATOR IRQ_SPI(83) 87#define IRQ_ROTATOR IRQ_SPI(83)
diff --git a/arch/arm/mach-exynos4/include/mach/regs-pmu.h b/arch/arm/mach-exynos4/include/mach/regs-pmu.h
index fa49bbb8e7b0..cdf9b47c303c 100644
--- a/arch/arm/mach-exynos4/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-pmu.h
@@ -29,6 +29,8 @@
29#define S5P_USE_STANDBY_WFE1 (1 << 25) 29#define S5P_USE_STANDBY_WFE1 (1 << 25)
30#define S5P_USE_MASK ((0x3 << 16) | (0x3 << 24)) 30#define S5P_USE_MASK ((0x3 << 16) | (0x3 << 24))
31 31
32#define S5P_SWRESET S5P_PMUREG(0x0400)
33
32#define S5P_WAKEUP_STAT S5P_PMUREG(0x0600) 34#define S5P_WAKEUP_STAT S5P_PMUREG(0x0600)
33#define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604) 35#define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604)
34#define S5P_WAKEUP_MASK S5P_PMUREG(0x0608) 36#define S5P_WAKEUP_MASK S5P_PMUREG(0x0608)
diff --git a/arch/arm/mach-exynos4/irq-eint.c b/arch/arm/mach-exynos4/irq-eint.c
index 9d87d2ac7f68..badb8c66fc9b 100644
--- a/arch/arm/mach-exynos4/irq-eint.c
+++ b/arch/arm/mach-exynos4/irq-eint.c
@@ -23,6 +23,8 @@
23 23
24#include <mach/regs-gpio.h> 24#include <mach/regs-gpio.h>
25 25
26#include <asm/mach/irq.h>
27
26static DEFINE_SPINLOCK(eint_lock); 28static DEFINE_SPINLOCK(eint_lock);
27 29
28static unsigned int eint0_15_data[16]; 30static unsigned int eint0_15_data[16];
@@ -184,8 +186,11 @@ static inline void exynos4_irq_demux_eint(unsigned int start)
184 186
185static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) 187static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
186{ 188{
189 struct irq_chip *chip = irq_get_chip(irq);
190 chained_irq_enter(chip, desc);
187 exynos4_irq_demux_eint(IRQ_EINT(16)); 191 exynos4_irq_demux_eint(IRQ_EINT(16));
188 exynos4_irq_demux_eint(IRQ_EINT(24)); 192 exynos4_irq_demux_eint(IRQ_EINT(24));
193 chained_irq_exit(chip, desc);
189} 194}
190 195
191static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc) 196static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
@@ -193,6 +198,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
193 u32 *irq_data = irq_get_handler_data(irq); 198 u32 *irq_data = irq_get_handler_data(irq);
194 struct irq_chip *chip = irq_get_chip(irq); 199 struct irq_chip *chip = irq_get_chip(irq);
195 200
201 chained_irq_enter(chip, desc);
196 chip->irq_mask(&desc->irq_data); 202 chip->irq_mask(&desc->irq_data);
197 203
198 if (chip->irq_ack) 204 if (chip->irq_ack)
@@ -201,6 +207,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
201 generic_handle_irq(*irq_data); 207 generic_handle_irq(*irq_data);
202 208
203 chip->irq_unmask(&desc->irq_data); 209 chip->irq_unmask(&desc->irq_data);
210 chained_irq_exit(chip, desc);
204} 211}
205 212
206int __init exynos4_init_irq_eint(void) 213int __init exynos4_init_irq_eint(void)
diff --git a/arch/arm/mach-exynos4/mach-universal_c210.c b/arch/arm/mach-exynos4/mach-universal_c210.c
index d7ec84d586f2..2aac6f755c8e 100644
--- a/arch/arm/mach-exynos4/mach-universal_c210.c
+++ b/arch/arm/mach-exynos4/mach-universal_c210.c
@@ -79,7 +79,7 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
79}; 79};
80 80
81static struct regulator_consumer_supply max8952_consumer = 81static struct regulator_consumer_supply max8952_consumer =
82 REGULATOR_SUPPLY("vddarm", NULL); 82 REGULATOR_SUPPLY("vdd_arm", NULL);
83 83
84static struct max8952_platform_data universal_max8952_pdata __initdata = { 84static struct max8952_platform_data universal_max8952_pdata __initdata = {
85 .gpio_vid0 = EXYNOS4_GPX0(3), 85 .gpio_vid0 = EXYNOS4_GPX0(3),
@@ -105,7 +105,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
105}; 105};
106 106
107static struct regulator_consumer_supply lp3974_buck1_consumer = 107static struct regulator_consumer_supply lp3974_buck1_consumer =
108 REGULATOR_SUPPLY("vddint", NULL); 108 REGULATOR_SUPPLY("vdd_int", NULL);
109 109
110static struct regulator_consumer_supply lp3974_buck2_consumer = 110static struct regulator_consumer_supply lp3974_buck2_consumer =
111 REGULATOR_SUPPLY("vddg3d", NULL); 111 REGULATOR_SUPPLY("vddg3d", NULL);
diff --git a/arch/arm/mach-exynos4/setup-usb-phy.c b/arch/arm/mach-exynos4/setup-usb-phy.c
index 0883c1b824b9..39aca045f660 100644
--- a/arch/arm/mach-exynos4/setup-usb-phy.c
+++ b/arch/arm/mach-exynos4/setup-usb-phy.c
@@ -82,7 +82,7 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
82 82
83 rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK); 83 rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK);
84 writel(rstcon, EXYNOS4_RSTCON); 84 writel(rstcon, EXYNOS4_RSTCON);
85 udelay(50); 85 udelay(80);
86 86
87 clk_disable(otg_clk); 87 clk_disable(otg_clk);
88 clk_put(otg_clk); 88 clk_put(otg_clk);
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index dc26fff22cf0..c8e7afcf14ec 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -62,6 +62,7 @@ config ARCH_EBSA285_HOST
62config ARCH_NETWINDER 62config ARCH_NETWINDER
63 bool "NetWinder" 63 bool "NetWinder"
64 select CLKSRC_I8253 64 select CLKSRC_I8253
65 select CLKEVT_I8253
65 select FOOTBRIDGE_HOST 66 select FOOTBRIDGE_HOST
66 select ISA 67 select ISA
67 select ISA_DMA 68 select ISA_DMA
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 1331fff51ae2..18c32a5541d9 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -18,6 +18,7 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <video/vga.h>
21 22
22#include <asm/irq.h> 23#include <asm/irq.h>
23#include <asm/system.h> 24#include <asm/system.h>
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 881add0fbe5b..b1ec2cf53bb0 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -310,7 +310,7 @@ static struct sys_timer eukrea_cpuimx27_timer = {
310 .init = eukrea_cpuimx27_timer_init, 310 .init = eukrea_cpuimx27_timer_init,
311}; 311};
312 312
313MACHINE_START(CPUIMX27, "EUKREA CPUIMX27") 313MACHINE_START(EUKREA_CPUIMX27, "EUKREA CPUIMX27")
314 .atag_offset = 0x100, 314 .atag_offset = 0x100,
315 .map_io = mx27_map_io, 315 .map_io = mx27_map_io,
316 .init_early = imx27_init_early, 316 .init_early = imx27_init_early,
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 10b89139da48..470b654b0e6e 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -192,7 +192,7 @@ struct sys_timer eukrea_cpuimx35_timer = {
192 .init = eukrea_cpuimx35_timer_init, 192 .init = eukrea_cpuimx35_timer_init,
193}; 193};
194 194
195MACHINE_START(EUKREA_CPUIMX35, "Eukrea CPUIMX35") 195MACHINE_START(EUKREA_CPUIMX35SD, "Eukrea CPUIMX35")
196 /* Maintainer: Eukrea Electromatique */ 196 /* Maintainer: Eukrea Electromatique */
197 .atag_offset = 0x100, 197 .atag_offset = 0x100,
198 .map_io = mx35_map_io, 198 .map_io = mx35_map_io,
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index d8699b54268d..9163318e95a2 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -161,7 +161,7 @@ static struct sys_timer eukrea_cpuimx25_timer = {
161 .init = eukrea_cpuimx25_timer_init, 161 .init = eukrea_cpuimx25_timer_init,
162}; 162};
163 163
164MACHINE_START(EUKREA_CPUIMX25, "Eukrea CPUIMX25") 164MACHINE_START(EUKREA_CPUIMX25SD, "Eukrea CPUIMX25")
165 /* Maintainer: Eukrea Electromatique */ 165 /* Maintainer: Eukrea Electromatique */
166 .atag_offset = 0x100, 166 .atag_offset = 0x100,
167 .map_io = mx25_map_io, 167 .map_io = mx25_map_io,
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index e25f60f5db8f..9e5c1663fc4f 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -77,7 +77,7 @@ static int __init dns323_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
77 /* 77 /*
78 * Check for devices with hard-wired IRQs. 78 * Check for devices with hard-wired IRQs.
79 */ 79 */
80 irq = orion5x_pci_map_irq(const dev, slot, pin); 80 irq = orion5x_pci_map_irq(dev, slot, pin);
81 if (irq != -1) 81 if (irq != -1)
82 return irq; 82 return irq;
83 83
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index 28b8760ab9fa..bc4a920e26ee 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -14,6 +14,7 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/mbus.h> 16#include <linux/mbus.h>
17#include <video/vga.h>
17#include <asm/irq.h> 18#include <asm/irq.h>
18#include <asm/mach/pci.h> 19#include <asm/mach/pci.h>
19#include <plat/pcie.h> 20#include <plat/pcie.h>
diff --git a/arch/arm/mach-realview/include/mach/system.h b/arch/arm/mach-realview/include/mach/system.h
index a30f2e3ec178..6657ff231161 100644
--- a/arch/arm/mach-realview/include/mach/system.h
+++ b/arch/arm/mach-realview/include/mach/system.h
@@ -44,6 +44,7 @@ static inline void arch_reset(char mode, const char *cmd)
44 */ 44 */
45 if (realview_reset) 45 if (realview_reset)
46 realview_reset(mode); 46 realview_reset(mode);
47 dsb();
47} 48}
48 49
49#endif 50#endif
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 8bad64370689..055e2858b0dd 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -16,6 +16,7 @@
16#include <linux/suspend.h> 16#include <linux/suspend.h>
17#include <linux/serial_core.h> 17#include <linux/serial_core.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/gpio.h>
19 20
20#include <mach/map.h> 21#include <mach/map.h>
21#include <mach/irqs.h> 22#include <mach/irqs.h>
diff --git a/arch/arm/mach-s5p64x0/irq-eint.c b/arch/arm/mach-s5p64x0/irq-eint.c
index 69ed4545112b..fe7380f5c3cd 100644
--- a/arch/arm/mach-s5p64x0/irq-eint.c
+++ b/arch/arm/mach-s5p64x0/irq-eint.c
@@ -129,7 +129,7 @@ static int s5p64x0_alloc_gc(void)
129 } 129 }
130 130
131 ct = gc->chip_types; 131 ct = gc->chip_types;
132 ct->chip.irq_ack = irq_gc_ack; 132 ct->chip.irq_ack = irq_gc_ack_set_bit;
133 ct->chip.irq_mask = irq_gc_mask_set_bit; 133 ct->chip.irq_mask = irq_gc_mask_set_bit;
134 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 134 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
135 ct->chip.irq_set_type = s5p64x0_irq_eint_set_type; 135 ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index 309e388a8a83..f149d278377b 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
88 SAVE_ITEM(S3C2410_TCNTO(0)), 88 SAVE_ITEM(S3C2410_TCNTO(0)),
89}; 89};
90 90
91void s5pv210_cpu_suspend(unsigned long arg) 91static int s5pv210_cpu_suspend(unsigned long arg)
92{ 92{
93 unsigned long tmp; 93 unsigned long tmp;
94 94
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 167a67c5ca54..5fde49da399a 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -342,6 +342,7 @@ static struct platform_device mipidsi0_device = {
342static struct sh_mobile_sdhi_info sdhi0_info = { 342static struct sh_mobile_sdhi_info sdhi0_info = {
343 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 343 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
344 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 344 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
345 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
345 .tmio_caps = MMC_CAP_SD_HIGHSPEED, 346 .tmio_caps = MMC_CAP_SD_HIGHSPEED,
346 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, 347 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
347}; 348};
@@ -383,7 +384,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
383} 384}
384 385
385static struct sh_mobile_sdhi_info sh_sdhi1_info = { 386static struct sh_mobile_sdhi_info sh_sdhi1_info = {
386 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, 387 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
387 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, 388 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
388 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 389 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
389 .set_pwr = ag5evm_sdhi1_set_pwr, 390 .set_pwr = ag5evm_sdhi1_set_pwr,
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index e7c5379069ef..de2253d7f157 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -642,6 +642,8 @@ static struct usbhs_private usbhs0_private = {
642 }, 642 },
643 .driver_param = { 643 .driver_param = {
644 .buswait_bwait = 4, 644 .buswait_bwait = 4,
645 .d0_tx_id = SHDMA_SLAVE_USB0_TX,
646 .d1_rx_id = SHDMA_SLAVE_USB0_RX,
645 }, 647 },
646 }, 648 },
647}; 649};
@@ -811,6 +813,8 @@ static struct usbhs_private usbhs1_private = {
811 .buswait_bwait = 4, 813 .buswait_bwait = 4,
812 .pipe_type = usbhs1_pipe_cfg, 814 .pipe_type = usbhs1_pipe_cfg,
813 .pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg), 815 .pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
816 .d0_tx_id = SHDMA_SLAVE_USB1_TX,
817 .d1_rx_id = SHDMA_SLAVE_USB1_RX,
814 }, 818 },
815 }, 819 },
816}; 820};
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index e6e11e4e2d43..66975921e646 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -503,16 +503,17 @@ static struct clk *late_main_clks[] = {
503 &sh7372_fsidivb_clk, 503 &sh7372_fsidivb_clk,
504}; 504};
505 505
506enum { MSTP001, 506enum { MSTP001, MSTP000,
507 MSTP131, MSTP130, 507 MSTP131, MSTP130,
508 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, 508 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
509 MSTP118, MSTP117, MSTP116, MSTP113, 509 MSTP118, MSTP117, MSTP116, MSTP113,
510 MSTP106, MSTP101, MSTP100, 510 MSTP106, MSTP101, MSTP100,
511 MSTP223, 511 MSTP223,
512 MSTP218, MSTP217, MSTP216, 512 MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207,
513 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 513 MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
514 MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, 514 MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
515 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403, 515 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406,
516 MSTP405, MSTP404, MSTP403, MSTP400,
516 MSTP_NR }; 517 MSTP_NR };
517 518
518#define MSTP(_parent, _reg, _bit, _flags) \ 519#define MSTP(_parent, _reg, _bit, _flags) \
@@ -520,6 +521,7 @@ enum { MSTP001,
520 521
521static struct clk mstp_clks[MSTP_NR] = { 522static struct clk mstp_clks[MSTP_NR] = {
522 [MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */ 523 [MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */
524 [MSTP000] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 0, 0), /* MSIOF0 */
523 [MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */ 525 [MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */
524 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ 526 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
525 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ 527 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
@@ -538,14 +540,16 @@ static struct clk mstp_clks[MSTP_NR] = {
538 [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ 540 [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
539 [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ 541 [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
540 [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */ 542 [MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
543 [MSTP214] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */
544 [MSTP208] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 8, 0), /* MSIOF1 */
541 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ 545 [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
542 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ 546 [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
547 [MSTP205] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 5, 0), /* MSIOF2 */
543 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ 548 [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
544 [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */ 549 [MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
545 [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */ 550 [MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
546 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ 551 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
547 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ 552 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
548 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
549 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */ 553 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
550 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ 554 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
551 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ 555 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
@@ -557,8 +561,12 @@ static struct clk mstp_clks[MSTP_NR] = {
557 [MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */ 561 [MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */
558 [MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */ 562 [MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */
559 [MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */ 563 [MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */
564 [MSTP407] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 7, 0), /* USB-DMAC1 */
560 [MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */ 565 [MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */
566 [MSTP405] = MSTP(&r_clk, SMSTPCR4, 5, 0), /* CMT4 */
567 [MSTP404] = MSTP(&r_clk, SMSTPCR4, 4, 0), /* CMT3 */
561 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ 568 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
569 [MSTP400] = MSTP(&r_clk, SMSTPCR4, 0, 0), /* CMT2 */
562}; 570};
563 571
564static struct clk_lookup lookups[] = { 572static struct clk_lookup lookups[] = {
@@ -609,6 +617,7 @@ static struct clk_lookup lookups[] = {
609 617
610 /* MSTP32 clocks */ 618 /* MSTP32 clocks */
611 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */ 619 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */
620 CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[MSTP000]), /* MSIOF0 */
612 CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */ 621 CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */
613 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ 622 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
614 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ 623 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
@@ -629,14 +638,16 @@ static struct clk_lookup lookups[] = {
629 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */ 638 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
630 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */ 639 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
631 CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */ 640 CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
641 CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), /* USB-DMAC0 */
642 CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[MSTP208]), /* MSIOF1 */
632 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 643 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
633 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */ 644 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
645 CLKDEV_DEV_ID("spi_sh_msiof.2", &mstp_clks[MSTP205]), /* MSIOF2 */
634 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ 646 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
635 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */ 647 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
636 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */ 648 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
637 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */ 649 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
638 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */ 650 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
639 CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
640 CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */ 651 CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
641 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */ 652 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
642 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ 653 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
@@ -650,10 +661,14 @@ static struct clk_lookup lookups[] = {
650 CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */ 661 CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */
651 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */ 662 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */
652 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */ 663 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */
664 CLKDEV_DEV_ID("sh-dma-engine.4", &mstp_clks[MSTP407]), /* USB-DMAC1 */
653 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */ 665 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
654 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */ 666 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
655 CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */ 667 CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */
668 CLKDEV_DEV_ID("sh_cmt.4", &mstp_clks[MSTP405]), /* CMT4 */
669 CLKDEV_DEV_ID("sh_cmt.3", &mstp_clks[MSTP404]), /* CMT3 */
656 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ 670 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
671 CLKDEV_DEV_ID("sh_cmt.2", &mstp_clks[MSTP400]), /* CMT2 */
657 672
658 CLKDEV_ICK_ID("hdmi", "sh_mobile_lcdc_fb.1", 673 CLKDEV_ICK_ID("hdmi", "sh_mobile_lcdc_fb.1",
659 &div6_reparent_clks[DIV6_HDMI]), 674 &div6_reparent_clks[DIV6_HDMI]),
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 6db2ccabc2bf..61a846bb30f2 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -365,7 +365,7 @@ void __init sh73a0_clock_init(void)
365 __raw_writel(0x108, SD2CKCR); 365 __raw_writel(0x108, SD2CKCR);
366 366
367 /* detect main clock parent */ 367 /* detect main clock parent */
368 switch ((__raw_readl(CKSCR) >> 24) & 0x03) { 368 switch ((__raw_readl(CKSCR) >> 28) & 0x03) {
369 case 0: 369 case 0:
370 main_clk.parent = &sh73a0_extal1_clk; 370 main_clk.parent = &sh73a0_extal1_clk;
371 break; 371 break;
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index ce595cee86cd..24e63a85e669 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -459,6 +459,10 @@ enum {
459 SHDMA_SLAVE_SDHI2_TX, 459 SHDMA_SLAVE_SDHI2_TX,
460 SHDMA_SLAVE_MMCIF_RX, 460 SHDMA_SLAVE_MMCIF_RX,
461 SHDMA_SLAVE_MMCIF_TX, 461 SHDMA_SLAVE_MMCIF_TX,
462 SHDMA_SLAVE_USB0_TX,
463 SHDMA_SLAVE_USB0_RX,
464 SHDMA_SLAVE_USB1_TX,
465 SHDMA_SLAVE_USB1_RX,
462}; 466};
463 467
464extern struct clk sh7372_extal1_clk; 468extern struct clk sh7372_extal1_clk;
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 3b28743c77eb..739315e30eb9 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -379,7 +379,7 @@ enum {
379 /* BBIF2 */ 379 /* BBIF2 */
380 VPU, 380 VPU,
381 TSIF1, 381 TSIF1,
382 _3DG_SGX530, 382 /* 3DG */
383 _2DDMAC, 383 _2DDMAC,
384 IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2, 384 IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
385 IPMMU_IPMMUR, IPMMU_IPMMUR2, 385 IPMMU_IPMMUR, IPMMU_IPMMUR2,
@@ -436,7 +436,7 @@ static struct intc_vect intcs_vectors[] = {
436 /* BBIF2 */ 436 /* BBIF2 */
437 INTCS_VECT(VPU, 0x980), 437 INTCS_VECT(VPU, 0x980),
438 INTCS_VECT(TSIF1, 0x9a0), 438 INTCS_VECT(TSIF1, 0x9a0),
439 INTCS_VECT(_3DG_SGX530, 0x9e0), 439 /* 3DG */
440 INTCS_VECT(_2DDMAC, 0xa00), 440 INTCS_VECT(_2DDMAC, 0xa00),
441 INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0), 441 INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0),
442 INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0), 442 INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0),
@@ -521,7 +521,7 @@ static struct intc_mask_reg intcs_mask_registers[] = {
521 RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } }, 521 RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } },
522 { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */ 522 { 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
523 { 0, 0, MSIOF, 0, 523 { 0, 0, MSIOF, 0,
524 _3DG_SGX530, 0, 0, 0 } }, 524 0, 0, 0, 0 } },
525 { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */ 525 { 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
526 { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0, 526 { 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0,
527 0, 0, 0, 0 } }, 527 0, 0, 0, 0 } },
@@ -561,7 +561,6 @@ static struct intc_prio_reg intcs_prio_registers[] = {
561 TMU_TUNI2, TSIF1 } }, 561 TMU_TUNI2, TSIF1 } },
562 { 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } }, 562 { 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } },
563 { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } }, 563 { 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } },
564 { 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, _3DG_SGX530, 0, 0 } },
565 { 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } }, 564 { 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } },
566 { 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } }, 565 { 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } },
567 { 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } }, 566 { 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } },
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 79f0413d8725..2d9b1b1a2538 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -169,35 +169,35 @@ static struct platform_device scif6_device = {
169}; 169};
170 170
171/* CMT */ 171/* CMT */
172static struct sh_timer_config cmt10_platform_data = { 172static struct sh_timer_config cmt2_platform_data = {
173 .name = "CMT10", 173 .name = "CMT2",
174 .channel_offset = 0x10, 174 .channel_offset = 0x40,
175 .timer_bit = 0, 175 .timer_bit = 5,
176 .clockevent_rating = 125, 176 .clockevent_rating = 125,
177 .clocksource_rating = 125, 177 .clocksource_rating = 125,
178}; 178};
179 179
180static struct resource cmt10_resources[] = { 180static struct resource cmt2_resources[] = {
181 [0] = { 181 [0] = {
182 .name = "CMT10", 182 .name = "CMT2",
183 .start = 0xe6138010, 183 .start = 0xe6130040,
184 .end = 0xe613801b, 184 .end = 0xe613004b,
185 .flags = IORESOURCE_MEM, 185 .flags = IORESOURCE_MEM,
186 }, 186 },
187 [1] = { 187 [1] = {
188 .start = evt2irq(0x0b00), /* CMT1_CMT10 */ 188 .start = evt2irq(0x0b80), /* CMT2 */
189 .flags = IORESOURCE_IRQ, 189 .flags = IORESOURCE_IRQ,
190 }, 190 },
191}; 191};
192 192
193static struct platform_device cmt10_device = { 193static struct platform_device cmt2_device = {
194 .name = "sh_cmt", 194 .name = "sh_cmt",
195 .id = 10, 195 .id = 2,
196 .dev = { 196 .dev = {
197 .platform_data = &cmt10_platform_data, 197 .platform_data = &cmt2_platform_data,
198 }, 198 },
199 .resource = cmt10_resources, 199 .resource = cmt2_resources,
200 .num_resources = ARRAY_SIZE(cmt10_resources), 200 .num_resources = ARRAY_SIZE(cmt2_resources),
201}; 201};
202 202
203/* TMU */ 203/* TMU */
@@ -602,6 +602,150 @@ static struct platform_device dma2_device = {
602 }, 602 },
603}; 603};
604 604
605/*
606 * USB-DMAC
607 */
608
609unsigned int usbts_shift[] = {3, 4, 5};
610
611enum {
612 XMIT_SZ_8BYTE = 0,
613 XMIT_SZ_16BYTE = 1,
614 XMIT_SZ_32BYTE = 2,
615};
616
617#define USBTS_INDEX2VAL(i) (((i) & 3) << 6)
618
619static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = {
620 {
621 .offset = 0,
622 }, {
623 .offset = 0x20,
624 },
625};
626
627/* USB DMAC0 */
628static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = {
629 {
630 .slave_id = SHDMA_SLAVE_USB0_TX,
631 .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
632 }, {
633 .slave_id = SHDMA_SLAVE_USB0_RX,
634 .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
635 },
636};
637
638static struct sh_dmae_pdata usb_dma0_platform_data = {
639 .slave = sh7372_usb_dmae0_slaves,
640 .slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves),
641 .channel = sh7372_usb_dmae_channels,
642 .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
643 .ts_low_shift = 6,
644 .ts_low_mask = 0xc0,
645 .ts_high_shift = 0,
646 .ts_high_mask = 0,
647 .ts_shift = usbts_shift,
648 .ts_shift_num = ARRAY_SIZE(usbts_shift),
649 .dmaor_init = DMAOR_DME,
650 .chcr_offset = 0x14,
651 .chcr_ie_bit = 1 << 5,
652 .dmaor_is_32bit = 1,
653 .needs_tend_set = 1,
654 .no_dmars = 1,
655};
656
657static struct resource sh7372_usb_dmae0_resources[] = {
658 {
659 /* Channel registers and DMAOR */
660 .start = 0xe68a0020,
661 .end = 0xe68a0064 - 1,
662 .flags = IORESOURCE_MEM,
663 },
664 {
665 /* VCR/SWR/DMICR */
666 .start = 0xe68a0000,
667 .end = 0xe68a0014 - 1,
668 .flags = IORESOURCE_MEM,
669 },
670 {
671 /* IRQ for channels */
672 .start = evt2irq(0x0a00),
673 .end = evt2irq(0x0a00),
674 .flags = IORESOURCE_IRQ,
675 },
676};
677
678static struct platform_device usb_dma0_device = {
679 .name = "sh-dma-engine",
680 .id = 3,
681 .resource = sh7372_usb_dmae0_resources,
682 .num_resources = ARRAY_SIZE(sh7372_usb_dmae0_resources),
683 .dev = {
684 .platform_data = &usb_dma0_platform_data,
685 },
686};
687
688/* USB DMAC1 */
689static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = {
690 {
691 .slave_id = SHDMA_SLAVE_USB1_TX,
692 .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
693 }, {
694 .slave_id = SHDMA_SLAVE_USB1_RX,
695 .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
696 },
697};
698
699static struct sh_dmae_pdata usb_dma1_platform_data = {
700 .slave = sh7372_usb_dmae1_slaves,
701 .slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves),
702 .channel = sh7372_usb_dmae_channels,
703 .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
704 .ts_low_shift = 6,
705 .ts_low_mask = 0xc0,
706 .ts_high_shift = 0,
707 .ts_high_mask = 0,
708 .ts_shift = usbts_shift,
709 .ts_shift_num = ARRAY_SIZE(usbts_shift),
710 .dmaor_init = DMAOR_DME,
711 .chcr_offset = 0x14,
712 .chcr_ie_bit = 1 << 5,
713 .dmaor_is_32bit = 1,
714 .needs_tend_set = 1,
715 .no_dmars = 1,
716};
717
718static struct resource sh7372_usb_dmae1_resources[] = {
719 {
720 /* Channel registers and DMAOR */
721 .start = 0xe68c0020,
722 .end = 0xe68c0064 - 1,
723 .flags = IORESOURCE_MEM,
724 },
725 {
726 /* VCR/SWR/DMICR */
727 .start = 0xe68c0000,
728 .end = 0xe68c0014 - 1,
729 .flags = IORESOURCE_MEM,
730 },
731 {
732 /* IRQ for channels */
733 .start = evt2irq(0x1d00),
734 .end = evt2irq(0x1d00),
735 .flags = IORESOURCE_IRQ,
736 },
737};
738
739static struct platform_device usb_dma1_device = {
740 .name = "sh-dma-engine",
741 .id = 4,
742 .resource = sh7372_usb_dmae1_resources,
743 .num_resources = ARRAY_SIZE(sh7372_usb_dmae1_resources),
744 .dev = {
745 .platform_data = &usb_dma1_platform_data,
746 },
747};
748
605/* VPU */ 749/* VPU */
606static struct uio_info vpu_platform_data = { 750static struct uio_info vpu_platform_data = {
607 .name = "VPU5HG", 751 .name = "VPU5HG",
@@ -818,7 +962,7 @@ static struct platform_device *sh7372_early_devices[] __initdata = {
818 &scif4_device, 962 &scif4_device,
819 &scif5_device, 963 &scif5_device,
820 &scif6_device, 964 &scif6_device,
821 &cmt10_device, 965 &cmt2_device,
822 &tmu00_device, 966 &tmu00_device,
823 &tmu01_device, 967 &tmu01_device,
824}; 968};
@@ -829,6 +973,8 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
829 &dma0_device, 973 &dma0_device,
830 &dma1_device, 974 &dma1_device,
831 &dma2_device, 975 &dma2_device,
976 &usb_dma0_device,
977 &usb_dma1_device,
832 &vpu_device, 978 &vpu_device,
833 &veu0_device, 979 &veu0_device,
834 &veu1_device, 980 &veu1_device,
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index de72058180a6..1fafc3244607 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -318,6 +318,10 @@ static struct clk v2m_sp804_clk = {
318 .rate = 1000000, 318 .rate = 1000000,
319}; 319};
320 320
321static struct clk v2m_ref_clk = {
322 .rate = 32768,
323};
324
321static struct clk dummy_apb_pclk; 325static struct clk dummy_apb_pclk;
322 326
323static struct clk_lookup v2m_lookups[] = { 327static struct clk_lookup v2m_lookups[] = {
@@ -348,6 +352,9 @@ static struct clk_lookup v2m_lookups[] = {
348 }, { /* CLCD */ 352 }, { /* CLCD */
349 .dev_id = "mb:clcd", 353 .dev_id = "mb:clcd",
350 .clk = &osc1_clk, 354 .clk = &osc1_clk,
355 }, { /* SP805 WDT */
356 .dev_id = "mb:wdt",
357 .clk = &v2m_ref_clk,
351 }, { /* SP804 timers */ 358 }, { /* SP804 timers */
352 .dev_id = "sp804", 359 .dev_id = "sp804",
353 .con_id = "v2m-timer0", 360 .con_id = "v2m-timer0",
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 92bd102e3982..2e6849b41f66 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -379,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext)
379 379
380/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 380/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
381.globl cpu_arm920_suspend_size 381.globl cpu_arm920_suspend_size
382.equ cpu_arm920_suspend_size, 4 * 3 382.equ cpu_arm920_suspend_size, 4 * 4
383#ifdef CONFIG_PM_SLEEP 383#ifdef CONFIG_PM_SLEEP
384ENTRY(cpu_arm920_do_suspend) 384ENTRY(cpu_arm920_do_suspend)
385 stmfd sp!, {r4 - r7, lr} 385 stmfd sp!, {r4 - r7, lr}
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 2bbcf053dffd..cd8f79c3a282 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -394,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext)
394 394
395/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 395/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
396.globl cpu_arm926_suspend_size 396.globl cpu_arm926_suspend_size
397.equ cpu_arm926_suspend_size, 4 * 3 397.equ cpu_arm926_suspend_size, 4 * 4
398#ifdef CONFIG_PM_SLEEP 398#ifdef CONFIG_PM_SLEEP
399ENTRY(cpu_arm926_do_suspend) 399ENTRY(cpu_arm926_do_suspend)
400 stmfd sp!, {r4 - r7, lr} 400 stmfd sp!, {r4 - r7, lr}
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 07219c2ae114..69e7f2ef7384 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -182,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend)
182 182
183ENTRY(cpu_sa1100_do_resume) 183ENTRY(cpu_sa1100_do_resume)
184 ldmia r0, {r4 - r7} @ load cp regs 184 ldmia r0, {r4 - r7} @ load cp regs
185 mov r1, #0 185 mov ip, #0
186 mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs 186 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs
187 mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache 187 mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache
188 mcr p15, 0, r1, c9, c0, 0 @ invalidate RB 188 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
189 mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB 189 mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB
190 190
191 mcr p15, 0, r4, c3, c0, 0 @ domain ID 191 mcr p15, 0, r4, c3, c0, 0 @ domain ID
192 mcr p15, 0, r5, c2, c0, 0 @ translation table base addr 192 mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 219138d2f158..a923aa0fd00d 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -223,6 +223,22 @@ __v6_setup:
223 mrc p15, 0, r0, c1, c0, 0 @ read control register 223 mrc p15, 0, r0, c1, c0, 0 @ read control register
224 bic r0, r0, r5 @ clear bits them 224 bic r0, r0, r5 @ clear bits them
225 orr r0, r0, r6 @ set them 225 orr r0, r0, r6 @ set them
226#ifdef CONFIG_ARM_ERRATA_364296
227 /*
228 * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data
229 * corruption with hit-under-miss enabled). The conditional code below
230 * (setting the undocumented bit 31 in the auxiliary control register
231 * and the FI bit in the control register) disables hit-under-miss
232 * without putting the processor into full low interrupt latency mode.
233 */
234 ldr r6, =0x4107b362 @ id for ARM1136 r0p2
235 mrc p15, 0, r5, c0, c0, 0 @ get processor id
236 teq r5, r6 @ check for the faulty core
237 mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg
238 orreq r5, r5, #(1 << 31) @ set the undocumented bit 31
239 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
240 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
241#endif
226 mov pc, lr @ return to head.S:__ret 242 mov pc, lr @ return to head.S:__ret
227 243
228 /* 244 /*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index a30e78542ccf..9049c0764db2 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -66,6 +66,7 @@ ENDPROC(cpu_v7_proc_fin)
66ENTRY(cpu_v7_reset) 66ENTRY(cpu_v7_reset)
67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
68 bic r1, r1, #0x1 @ ...............m 68 bic r1, r1, #0x1 @ ...............m
69 THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
69 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 70 mcr p15, 0, r1, c1, c0, 0 @ disable MMU
70 isb 71 isb
71 mov pc, r0 72 mov pc, r0
@@ -247,13 +248,16 @@ ENTRY(cpu_v7_do_resume)
247 mcr p15, 0, r7, c2, c0, 0 @ TTB 0 248 mcr p15, 0, r7, c2, c0, 0 @ TTB 0
248 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 249 mcr p15, 0, r8, c2, c0, 1 @ TTB 1
249 mcr p15, 0, ip, c2, c0, 2 @ TTB control register 250 mcr p15, 0, ip, c2, c0, 2 @ TTB control register
250 mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register 251 mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
252 teq r4, r10 @ Is it already set?
253 mcrne p15, 0, r10, c1, c0, 1 @ No, so write it
251 mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control 254 mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control
252 ldr r4, =PRRR @ PRRR 255 ldr r4, =PRRR @ PRRR
253 ldr r5, =NMRR @ NMRR 256 ldr r5, =NMRR @ NMRR
254 mcr p15, 0, r4, c10, c2, 0 @ write PRRR 257 mcr p15, 0, r4, c10, c2, 0 @ write PRRR
255 mcr p15, 0, r5, c10, c2, 1 @ write NMRR 258 mcr p15, 0, r5, c10, c2, 1 @ write NMRR
256 isb 259 isb
260 dsb
257 mov r0, r9 @ control register 261 mov r0, r9 @ control register
258 mov r2, r7, lsr #14 @ get TTB0 base 262 mov r2, r7, lsr #14 @ get TTB0 base
259 mov r2, r2, lsl #14 263 mov r2, r2, lsl #14
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 28c72a2006a1..755e1bf22681 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
406 .align 406 .align
407 407
408.globl cpu_xsc3_suspend_size 408.globl cpu_xsc3_suspend_size
409.equ cpu_xsc3_suspend_size, 4 * 8 409.equ cpu_xsc3_suspend_size, 4 * 7
410#ifdef CONFIG_PM_SLEEP 410#ifdef CONFIG_PM_SLEEP
411ENTRY(cpu_xsc3_do_suspend) 411ENTRY(cpu_xsc3_do_suspend)
412 stmfd sp!, {r4 - r10, lr} 412 stmfd sp!, {r4 - r10, lr}
@@ -418,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend)
418 mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg 418 mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
419 mrc p15, 0, r10, c1, c0, 0 @ control reg 419 mrc p15, 0, r10, c1, c0, 0 @ control reg
420 bic r4, r4, #2 @ clear frequency change bit 420 bic r4, r4, #2 @ clear frequency change bit
421 stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs 421 stmia r0, {r4 - r10} @ store cp regs
422 ldmia sp!, {r4 - r10, pc} 422 ldmia sp!, {r4 - r10, pc}
423ENDPROC(cpu_xsc3_do_suspend) 423ENDPROC(cpu_xsc3_do_suspend)
424 424
425ENTRY(cpu_xsc3_do_resume) 425ENTRY(cpu_xsc3_do_resume)
426 ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs 426 ldmia r0, {r4 - r10} @ load cp regs
427 mov ip, #0 427 mov ip, #0
428 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB 428 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
429 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer 429 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index 02af235298e2..5f84a3f13ef9 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -192,7 +192,7 @@ unsigned long s5p_spdif_get_rate(struct clk *clk)
192 if (IS_ERR(pclk)) 192 if (IS_ERR(pclk))
193 return -EINVAL; 193 return -EINVAL;
194 194
195 rate = pclk->ops->get_rate(clk); 195 rate = pclk->ops->get_rate(pclk);
196 clk_put(pclk); 196 clk_put(pclk);
197 197
198 return rate; 198 return rate;
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index 327ab9f662e8..f71078ef6bb5 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -23,6 +23,8 @@
23#include <plat/gpio-core.h> 23#include <plat/gpio-core.h>
24#include <plat/gpio-cfg.h> 24#include <plat/gpio-cfg.h>
25 25
26#include <asm/mach/irq.h>
27
26#define GPIO_BASE(chip) (((unsigned long)(chip)->base) & 0xFFFFF000u) 28#define GPIO_BASE(chip) (((unsigned long)(chip)->base) & 0xFFFFF000u)
27 29
28#define CON_OFFSET 0x700 30#define CON_OFFSET 0x700
@@ -81,6 +83,9 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
81 int group, pend_offset, mask_offset; 83 int group, pend_offset, mask_offset;
82 unsigned int pend, mask; 84 unsigned int pend, mask;
83 85
86 struct irq_chip *chip = irq_get_chip(irq);
87 chained_irq_enter(chip, desc);
88
84 for (group = 0; group < bank->nr_groups; group++) { 89 for (group = 0; group < bank->nr_groups; group++) {
85 struct s3c_gpio_chip *chip = bank->chips[group]; 90 struct s3c_gpio_chip *chip = bank->chips[group];
86 if (!chip) 91 if (!chip)
@@ -102,6 +107,7 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
102 pend &= ~BIT(offset); 107 pend &= ~BIT(offset);
103 } 108 }
104 } 109 }
110 chained_irq_exit(chip, desc);
105} 111}
106 112
107static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) 113static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
diff --git a/arch/arm/plat-samsung/include/plat/backlight.h b/arch/arm/plat-samsung/include/plat/backlight.h
index 51d8da846a62..ad530c78fe8c 100644
--- a/arch/arm/plat-samsung/include/plat/backlight.h
+++ b/arch/arm/plat-samsung/include/plat/backlight.h
@@ -20,7 +20,7 @@ struct samsung_bl_gpio_info {
20 int func; 20 int func;
21}; 21};
22 22
23extern void samsung_bl_set(struct samsung_bl_gpio_info *gpio_info, 23extern void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
24 struct platform_pwm_backlight_data *bl_data); 24 struct platform_pwm_backlight_data *bl_data);
25 25
26#endif /* __ASM_PLAT_BACKLIGHT_H */ 26#endif /* __ASM_PLAT_BACKLIGHT_H */
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
index f714d060370d..51583cd30164 100644
--- a/arch/arm/plat-samsung/irq-vic-timer.c
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -22,9 +22,14 @@
22#include <plat/irq-vic-timer.h> 22#include <plat/irq-vic-timer.h>
23#include <plat/regs-timer.h> 23#include <plat/regs-timer.h>
24 24
25#include <asm/mach/irq.h>
26
25static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc) 27static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
26{ 28{
29 struct irq_chip *chip = irq_get_chip(irq);
30 chained_irq_enter(chip, desc);
27 generic_handle_irq((int)desc->irq_data.handler_data); 31 generic_handle_irq((int)desc->irq_data.handler_data);
32 chained_irq_exit(chip, desc);
28} 33}
29 34
30/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */ 35/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index fff68d0d521b..62cc8f981171 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -351,7 +351,7 @@ centro MACH_CENTRO CENTRO 1944
351nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955 351nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955
352omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967 352omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967
353cpuat9260 MACH_CPUAT9260 CPUAT9260 1973 353cpuat9260 MACH_CPUAT9260 CPUAT9260 1973
354eukrea_cpuimx27 MACH_CPUIMX27 CPUIMX27 1975 354eukrea_cpuimx27 MACH_EUKREA_CPUIMX27 EUKREA_CPUIMX27 1975
355acs5k MACH_ACS5K ACS5K 1982 355acs5k MACH_ACS5K ACS5K 1982
356snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987 356snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987
357dsm320 MACH_DSM320 DSM320 1988 357dsm320 MACH_DSM320 DSM320 1988
@@ -476,8 +476,8 @@ cns3420vb MACH_CNS3420VB CNS3420VB 2776
476omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791 476omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791
477ti8168evm MACH_TI8168EVM TI8168EVM 2800 477ti8168evm MACH_TI8168EVM TI8168EVM 2800
478teton_bga MACH_TETON_BGA TETON_BGA 2816 478teton_bga MACH_TETON_BGA TETON_BGA 2816
479eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25 EUKREA_CPUIMX25 2820 479eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25SD EUKREA_CPUIMX25SD 2820
480eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35 EUKREA_CPUIMX35 2821 480eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
481eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822 481eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
482eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823 482eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
483smdkc210 MACH_SMDKC210 SMDKC210 2838 483smdkc210 MACH_SMDKC210 SMDKC210 2838
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index e66366fd2abc..3735abd7f8f6 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -259,7 +259,7 @@
259 ENTRY_SAME(ni_syscall) /* query_module */ 259 ENTRY_SAME(ni_syscall) /* query_module */
260 ENTRY_SAME(poll) 260 ENTRY_SAME(poll)
261 /* structs contain pointers and an in_addr... */ 261 /* structs contain pointers and an in_addr... */
262 ENTRY_COMP(nfsservctl) 262 ENTRY_SAME(ni_syscall) /* was nfsservctl */
263 ENTRY_SAME(setresgid) /* 170 */ 263 ENTRY_SAME(setresgid) /* 170 */
264 ENTRY_SAME(getresgid) 264 ENTRY_SAME(getresgid)
265 ENTRY_SAME(prctl) 265 ENTRY_SAME(prctl)
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index f6736b7da463..fa0d27a400de 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -171,7 +171,7 @@ SYSCALL_SPU(setresuid)
171SYSCALL_SPU(getresuid) 171SYSCALL_SPU(getresuid)
172SYSCALL(ni_syscall) 172SYSCALL(ni_syscall)
173SYSCALL_SPU(poll) 173SYSCALL_SPU(poll)
174COMPAT_SYS(nfsservctl) 174SYSCALL(ni_syscall)
175SYSCALL_SPU(setresgid) 175SYSCALL_SPU(setresgid)
176SYSCALL_SPU(getresgid) 176SYSCALL_SPU(getresgid)
177COMPAT_SYS_SPU(prctl) 177COMPAT_SYS_SPU(prctl)
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index b97baf81a87b..2d3679b2447f 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
123struct perf_event; 123struct perf_event;
124struct perf_sample_data; 124struct perf_sample_data;
125 125
126extern void ptrace_triggered(struct perf_event *bp, int nmi, 126extern void ptrace_triggered(struct perf_event *bp,
127 struct perf_sample_data *data, struct pt_regs *regs); 127 struct perf_sample_data *data, struct pt_regs *regs);
128 128
129#define task_pt_regs(task) \ 129#define task_pt_regs(task) \
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index e915deafac89..05559295d2ca 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -15,6 +15,7 @@
15#include <linux/serial_sci.h> 15#include <linux/serial_sci.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/dma-mapping.h>
18#include <linux/sh_timer.h> 19#include <linux/sh_timer.h>
19#include <linux/sh_dma.h> 20#include <linux/sh_dma.h>
20 21
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 32114e0941ae..db4ecd731a00 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <asm/smp.h> 23#include <asm/smp.h>
24 24
25static void (*pm_idle)(void); 25void (*pm_idle)(void);
26 26
27static int hlt_counter; 27static int hlt_counter;
28 28
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index d9006f8ffc14..7bbef95c9d1b 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -316,6 +316,35 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
316 break; 316 break;
317 } 317 }
318 break; 318 break;
319
320 case 9: /* mov.w @(disp,PC),Rn */
321 srcu = (unsigned char __user *)regs->pc;
322 srcu += 4;
323 srcu += (instruction & 0x00FF) << 1;
324 dst = (unsigned char *)rn;
325 *(unsigned long *)dst = 0;
326
327#if !defined(__LITTLE_ENDIAN__)
328 dst += 2;
329#endif
330
331 if (ma->from(dst, srcu, 2))
332 goto fetch_fault;
333 sign_extend(2, dst);
334 ret = 0;
335 break;
336
337 case 0xd: /* mov.l @(disp,PC),Rn */
338 srcu = (unsigned char __user *)(regs->pc & ~0x3);
339 srcu += 4;
340 srcu += (instruction & 0x00FF) << 2;
341 dst = (unsigned char *)rn;
342 *(unsigned long *)dst = 0;
343
344 if (ma->from(dst, srcu, 4))
345 goto fetch_fault;
346 ret = 0;
347 break;
319 } 348 }
320 return ret; 349 return ret;
321 350
@@ -466,6 +495,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
466 case 0x0500: /* mov.w @(disp,Rm),R0 */ 495 case 0x0500: /* mov.w @(disp,Rm),R0 */
467 goto simple; 496 goto simple;
468 case 0x0B00: /* bf lab - no delayslot*/ 497 case 0x0B00: /* bf lab - no delayslot*/
498 ret = 0;
469 break; 499 break;
470 case 0x0F00: /* bf/s lab */ 500 case 0x0F00: /* bf/s lab */
471 ret = handle_delayslot(regs, instruction, ma); 501 ret = handle_delayslot(regs, instruction, ma);
@@ -479,6 +509,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
479 } 509 }
480 break; 510 break;
481 case 0x0900: /* bt lab - no delayslot */ 511 case 0x0900: /* bt lab - no delayslot */
512 ret = 0;
482 break; 513 break;
483 case 0x0D00: /* bt/s lab */ 514 case 0x0D00: /* bt/s lab */
484 ret = handle_delayslot(regs, instruction, ma); 515 ret = handle_delayslot(regs, instruction, ma);
@@ -494,6 +525,9 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
494 } 525 }
495 break; 526 break;
496 527
528 case 0x9000: /* mov.w @(disp,Rm),Rn */
529 goto simple;
530
497 case 0xA000: /* bra label */ 531 case 0xA000: /* bra label */
498 ret = handle_delayslot(regs, instruction, ma); 532 ret = handle_delayslot(regs, instruction, ma);
499 if (ret==0) 533 if (ret==0)
@@ -507,6 +541,9 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
507 regs->pc += SH_PC_12BIT_OFFSET(instruction); 541 regs->pc += SH_PC_12BIT_OFFSET(instruction);
508 } 542 }
509 break; 543 break;
544
545 case 0xD000: /* mov.l @(disp,Rm),Rn */
546 goto simple;
510 } 547 }
511 return ret; 548 return ret;
512 549
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index 100b9c204e78..42851122bbd9 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -88,7 +88,7 @@ BTFIXUPDEF_CALL(void, set_irq_udt, int)
88#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu) 88#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
89 89
90/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */ 90/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
91#define SUN4D_IPI_IRQ 14 91#define SUN4D_IPI_IRQ 13
92 92
93extern void sun4d_ipi_interrupt(void); 93extern void sun4d_ipi_interrupt(void);
94 94
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index c9296ab0b1f4..edbec45d4688 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -68,7 +68,7 @@ sys_call_table32:
68 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall 68 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
69/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler 69/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
70 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep 70 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
71/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl 71/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys_nis_syscall
72 .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep 72 .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
73/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun 73/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
74 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy 74 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy