aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/bios32.c75
-rw-r--r--arch/arm/kernel/crunch-bits.S305
-rw-r--r--arch/arm/kernel/crunch.c88
-rw-r--r--arch/arm/kernel/entry-armv.S3
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/perf_event.c49
-rw-r--r--arch/arm/kernel/perf_event_v6.c22
-rw-r--r--arch/arm/kernel/perf_event_v7.c11
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
-rw-r--r--arch/arm/kernel/process.c34
-rw-r--r--arch/arm/kernel/ptrace.c9
-rw-r--r--arch/arm/kernel/smp.c29
-rw-r--r--arch/arm/kernel/smp_twd.c125
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
16 files changed, 234 insertions, 553 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 17663c63f7a1..3a274878412e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -61,9 +61,6 @@ obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
61CFLAGS_swp_emulate.o := -Wa,-march=armv7-a 61CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
62obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 62obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
63 63
64obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
65AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
66
67obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o 64obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
68obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o 65obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
69obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o 66obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index f58ba3589908..632df9a66f8c 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -16,7 +16,6 @@
16#include <asm/mach/pci.h> 16#include <asm/mach/pci.h>
17 17
18static int debug_pci; 18static int debug_pci;
19static int use_firmware;
20 19
21/* 20/*
22 * We can't use pci_find_device() here since we are 21 * We can't use pci_find_device() here since we are
@@ -295,28 +294,6 @@ static inline int pdev_bad_for_parity(struct pci_dev *dev)
295} 294}
296 295
297/* 296/*
298 * Adjust the device resources from bus-centric to Linux-centric.
299 */
300static void __devinit
301pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev)
302{
303 resource_size_t offset;
304 int i;
305
306 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
307 if (dev->resource[i].start == 0)
308 continue;
309 if (dev->resource[i].flags & IORESOURCE_MEM)
310 offset = root->mem_offset;
311 else
312 offset = root->io_offset;
313
314 dev->resource[i].start += offset;
315 dev->resource[i].end += offset;
316 }
317}
318
319/*
320 * pcibios_fixup_bus - Called after each bus is probed, 297 * pcibios_fixup_bus - Called after each bus is probed,
321 * but before its children are examined. 298 * but before its children are examined.
322 */ 299 */
@@ -333,8 +310,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
333 list_for_each_entry(dev, &bus->devices, bus_list) { 310 list_for_each_entry(dev, &bus->devices, bus_list) {
334 u16 status; 311 u16 status;
335 312
336 pdev_fixup_device_resources(root, dev);
337
338 pci_read_config_word(dev, PCI_STATUS, &status); 313 pci_read_config_word(dev, PCI_STATUS, &status);
339 314
340 /* 315 /*
@@ -400,43 +375,6 @@ EXPORT_SYMBOL(pcibios_fixup_bus);
400#endif 375#endif
401 376
402/* 377/*
403 * Convert from Linux-centric to bus-centric addresses for bridge devices.
404 */
405void
406pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
407 struct resource *res)
408{
409 struct pci_sys_data *root = dev->sysdata;
410 unsigned long offset = 0;
411
412 if (res->flags & IORESOURCE_IO)
413 offset = root->io_offset;
414 if (res->flags & IORESOURCE_MEM)
415 offset = root->mem_offset;
416
417 region->start = res->start - offset;
418 region->end = res->end - offset;
419}
420EXPORT_SYMBOL(pcibios_resource_to_bus);
421
422void __devinit
423pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
424 struct pci_bus_region *region)
425{
426 struct pci_sys_data *root = dev->sysdata;
427 unsigned long offset = 0;
428
429 if (res->flags & IORESOURCE_IO)
430 offset = root->io_offset;
431 if (res->flags & IORESOURCE_MEM)
432 offset = root->mem_offset;
433
434 res->start = region->start + offset;
435 res->end = region->end + offset;
436}
437EXPORT_SYMBOL(pcibios_bus_to_resource);
438
439/*
440 * Swizzle the device pin each time we cross a bridge. 378 * Swizzle the device pin each time we cross a bridge.
441 * This might update pin and returns the slot number. 379 * This might update pin and returns the slot number.
442 */ 380 */
@@ -497,10 +435,10 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
497 435
498 if (ret > 0) { 436 if (ret > 0) {
499 if (list_empty(&sys->resources)) { 437 if (list_empty(&sys->resources)) {
500 pci_add_resource(&sys->resources, 438 pci_add_resource_offset(&sys->resources,
501 &ioport_resource); 439 &ioport_resource, sys->io_offset);
502 pci_add_resource(&sys->resources, 440 pci_add_resource_offset(&sys->resources,
503 &iomem_resource); 441 &iomem_resource, sys->mem_offset);
504 } 442 }
505 443
506 sys->bus = hw->scan(nr, sys); 444 sys->bus = hw->scan(nr, sys);
@@ -525,6 +463,7 @@ void __init pci_common_init(struct hw_pci *hw)
525 463
526 INIT_LIST_HEAD(&hw->buses); 464 INIT_LIST_HEAD(&hw->buses);
527 465
466 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
528 if (hw->preinit) 467 if (hw->preinit)
529 hw->preinit(); 468 hw->preinit();
530 pcibios_init_hw(hw); 469 pcibios_init_hw(hw);
@@ -536,7 +475,7 @@ void __init pci_common_init(struct hw_pci *hw)
536 list_for_each_entry(sys, &hw->buses, node) { 475 list_for_each_entry(sys, &hw->buses, node) {
537 struct pci_bus *bus = sys->bus; 476 struct pci_bus *bus = sys->bus;
538 477
539 if (!use_firmware) { 478 if (!pci_has_flag(PCI_PROBE_ONLY)) {
540 /* 479 /*
541 * Size the bridge windows. 480 * Size the bridge windows.
542 */ 481 */
@@ -573,7 +512,7 @@ char * __init pcibios_setup(char *str)
573 debug_pci = 1; 512 debug_pci = 1;
574 return NULL; 513 return NULL;
575 } else if (!strcmp(str, "firmware")) { 514 } else if (!strcmp(str, "firmware")) {
576 use_firmware = 1; 515 pci_add_flags(PCI_PROBE_ONLY);
577 return NULL; 516 return NULL;
578 } 517 }
579 return str; 518 return str;
diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S
deleted file mode 100644
index 0ec9bb48fab9..000000000000
--- a/arch/arm/kernel/crunch-bits.S
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 * arch/arm/kernel/crunch-bits.S
3 * Cirrus MaverickCrunch context switching and handling
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 *
7 * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
8 * Copyright (c) 2003-2004, MontaVista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/linkage.h>
16#include <asm/ptrace.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19#include <mach/ep93xx-regs.h>
20
21/*
22 * We can't use hex constants here due to a bug in gas.
23 */
24#define CRUNCH_MVDX0 0
25#define CRUNCH_MVDX1 8
26#define CRUNCH_MVDX2 16
27#define CRUNCH_MVDX3 24
28#define CRUNCH_MVDX4 32
29#define CRUNCH_MVDX5 40
30#define CRUNCH_MVDX6 48
31#define CRUNCH_MVDX7 56
32#define CRUNCH_MVDX8 64
33#define CRUNCH_MVDX9 72
34#define CRUNCH_MVDX10 80
35#define CRUNCH_MVDX11 88
36#define CRUNCH_MVDX12 96
37#define CRUNCH_MVDX13 104
38#define CRUNCH_MVDX14 112
39#define CRUNCH_MVDX15 120
40#define CRUNCH_MVAX0L 128
41#define CRUNCH_MVAX0M 132
42#define CRUNCH_MVAX0H 136
43#define CRUNCH_MVAX1L 140
44#define CRUNCH_MVAX1M 144
45#define CRUNCH_MVAX1H 148
46#define CRUNCH_MVAX2L 152
47#define CRUNCH_MVAX2M 156
48#define CRUNCH_MVAX2H 160
49#define CRUNCH_MVAX3L 164
50#define CRUNCH_MVAX3M 168
51#define CRUNCH_MVAX3H 172
52#define CRUNCH_DSPSC 176
53
54#define CRUNCH_SIZE 184
55
56 .text
57
58/*
59 * Lazy switching of crunch coprocessor context
60 *
61 * r10 = struct thread_info pointer
62 * r9 = ret_from_exception
63 * lr = undefined instr exit
64 *
65 * called from prefetch exception handler with interrupts disabled
66 */
67ENTRY(crunch_task_enable)
68 ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
69
70 ldr r1, [r8, #0x80]
71 tst r1, #0x00800000 @ access to crunch enabled?
72 movne pc, lr @ if so no business here
73 mov r3, #0xaa @ unlock syscon swlock
74 str r3, [r8, #0xc0]
75 orr r1, r1, #0x00800000 @ enable access to crunch
76 str r1, [r8, #0x80]
77
78 ldr r3, =crunch_owner
79 add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
80 ldr r2, [sp, #60] @ current task pc value
81 ldr r1, [r3] @ get current crunch owner
82 str r0, [r3] @ this task now owns crunch
83 sub r2, r2, #4 @ adjust pc back
84 str r2, [sp, #60]
85
86 ldr r2, [r8, #0x80]
87 mov r2, r2 @ flush out enable (@@@)
88
89 teq r1, #0 @ test for last ownership
90 mov lr, r9 @ normal exit from exception
91 beq crunch_load @ no owner, skip save
92
93crunch_save:
94 cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
95 cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
96 cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
97 cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
98 cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
99 cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
100 cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
101 cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
102 cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
103 cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
104 cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
105 cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
106 cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
107 cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
108 cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
109 cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
110
111#ifdef __ARMEB__
112#error fix me for ARMEB
113#endif
114
115 cfmv32al mvfx0, mvax0 @ save 72b accumulators
116 cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
117 cfmv32am mvfx0, mvax0
118 cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
119 cfmv32ah mvfx0, mvax0
120 cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
121 cfmv32al mvfx0, mvax1
122 cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
123 cfmv32am mvfx0, mvax1
124 cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
125 cfmv32ah mvfx0, mvax1
126 cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
127 cfmv32al mvfx0, mvax2
128 cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
129 cfmv32am mvfx0, mvax2
130 cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
131 cfmv32ah mvfx0, mvax2
132 cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
133 cfmv32al mvfx0, mvax3
134 cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
135 cfmv32am mvfx0, mvax3
136 cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
137 cfmv32ah mvfx0, mvax3
138 cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
139
140 cfmv32sc mvdx0, dspsc @ save status word
141 cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
142
143 teq r0, #0 @ anything to load?
144 cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
145 moveq pc, lr
146
147crunch_load:
148 cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
149 cfmvsc32 dspsc, mvdx0
150
151 cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
152 cfmval32 mvax0, mvfx0
153 cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
154 cfmvam32 mvax0, mvfx0
155 cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
156 cfmvah32 mvax0, mvfx0
157 cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
158 cfmval32 mvax1, mvfx0
159 cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
160 cfmvam32 mvax1, mvfx0
161 cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
162 cfmvah32 mvax1, mvfx0
163 cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
164 cfmval32 mvax2, mvfx0
165 cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
166 cfmvam32 mvax2, mvfx0
167 cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
168 cfmvah32 mvax2, mvfx0
169 cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
170 cfmval32 mvax3, mvfx0
171 cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
172 cfmvam32 mvax3, mvfx0
173 cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
174 cfmvah32 mvax3, mvfx0
175
176 cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
177 cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
178 cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
179 cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
180 cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
181 cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
182 cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
183 cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
184 cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
185 cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
186 cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
187 cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
188 cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
189 cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
190 cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
191 cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
192
193 mov pc, lr
194
195/*
196 * Back up crunch regs to save area and disable access to them
197 * (mainly for gdb or sleep mode usage)
198 *
199 * r0 = struct thread_info pointer of target task or NULL for any
200 */
201ENTRY(crunch_task_disable)
202 stmfd sp!, {r4, r5, lr}
203
204 mrs ip, cpsr
205 orr r2, ip, #PSR_I_BIT @ disable interrupts
206 msr cpsr_c, r2
207
208 ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
209
210 ldr r3, =crunch_owner
211 add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
212 ldr r1, [r3] @ get current crunch owner
213 teq r1, #0 @ any current owner?
214 beq 1f @ no: quit
215 teq r0, #0 @ any owner?
216 teqne r1, r2 @ or specified one?
217 bne 1f @ no: quit
218
219 ldr r5, [r4, #0x80] @ enable access to crunch
220 mov r2, #0xaa
221 str r2, [r4, #0xc0]
222 orr r5, r5, #0x00800000
223 str r5, [r4, #0x80]
224
225 mov r0, #0 @ nothing to load
226 str r0, [r3] @ no more current owner
227 ldr r2, [r4, #0x80] @ flush out enable (@@@)
228 mov r2, r2
229 bl crunch_save
230
231 mov r2, #0xaa @ disable access to crunch
232 str r2, [r4, #0xc0]
233 bic r5, r5, #0x00800000
234 str r5, [r4, #0x80]
235 ldr r5, [r4, #0x80] @ flush out enable (@@@)
236 mov r5, r5
237
2381: msr cpsr_c, ip @ restore interrupt mode
239 ldmfd sp!, {r4, r5, pc}
240
241/*
242 * Copy crunch state to given memory address
243 *
244 * r0 = struct thread_info pointer of target task
245 * r1 = memory address where to store crunch state
246 *
247 * this is called mainly in the creation of signal stack frames
248 */
249ENTRY(crunch_task_copy)
250 mrs ip, cpsr
251 orr r2, ip, #PSR_I_BIT @ disable interrupts
252 msr cpsr_c, r2
253
254 ldr r3, =crunch_owner
255 add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
256 ldr r3, [r3] @ get current crunch owner
257 teq r2, r3 @ does this task own it...
258 beq 1f
259
260 @ current crunch values are in the task save area
261 msr cpsr_c, ip @ restore interrupt mode
262 mov r0, r1
263 mov r1, r2
264 mov r2, #CRUNCH_SIZE
265 b memcpy
266
2671: @ this task owns crunch regs -- grab a copy from there
268 mov r0, #0 @ nothing to load
269 mov r3, lr @ preserve return address
270 bl crunch_save
271 msr cpsr_c, ip @ restore interrupt mode
272 mov pc, r3
273
274/*
275 * Restore crunch state from given memory address
276 *
277 * r0 = struct thread_info pointer of target task
278 * r1 = memory address where to get crunch state from
279 *
280 * this is used to restore crunch state when unwinding a signal stack frame
281 */
282ENTRY(crunch_task_restore)
283 mrs ip, cpsr
284 orr r2, ip, #PSR_I_BIT @ disable interrupts
285 msr cpsr_c, r2
286
287 ldr r3, =crunch_owner
288 add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
289 ldr r3, [r3] @ get current crunch owner
290 teq r2, r3 @ does this task own it...
291 beq 1f
292
293 @ this task doesn't own crunch regs -- use its save area
294 msr cpsr_c, ip @ restore interrupt mode
295 mov r0, r2
296 mov r2, #CRUNCH_SIZE
297 b memcpy
298
2991: @ this task owns crunch regs -- load them directly
300 mov r0, r1
301 mov r1, #0 @ nothing to save
302 mov r3, lr @ preserve return address
303 bl crunch_load
304 msr cpsr_c, ip @ restore interrupt mode
305 mov pc, r3
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
deleted file mode 100644
index 25ef223ba7f3..000000000000
--- a/arch/arm/kernel/crunch.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * arch/arm/kernel/crunch.c
3 * Cirrus MaverickCrunch context switching and handling
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <mach/ep93xx-regs.h>
20#include <asm/thread_notify.h>
21
22struct crunch_state *crunch_owner;
23
24void crunch_task_release(struct thread_info *thread)
25{
26 local_irq_disable();
27 if (crunch_owner == &thread->crunchstate)
28 crunch_owner = NULL;
29 local_irq_enable();
30}
31
32static int crunch_enabled(u32 devcfg)
33{
34 return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
35}
36
37static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
38{
39 struct thread_info *thread = (struct thread_info *)t;
40 struct crunch_state *crunch_state;
41 u32 devcfg;
42
43 crunch_state = &thread->crunchstate;
44
45 switch (cmd) {
46 case THREAD_NOTIFY_FLUSH:
47 memset(crunch_state, 0, sizeof(*crunch_state));
48
49 /*
50 * FALLTHROUGH: Ensure we don't try to overwrite our newly
51 * initialised state information on the first fault.
52 */
53
54 case THREAD_NOTIFY_EXIT:
55 crunch_task_release(thread);
56 break;
57
58 case THREAD_NOTIFY_SWITCH:
59 devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
60 if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
61 /*
62 * We don't use ep93xx_syscon_swlocked_write() here
63 * because we are on the context switch path and
64 * preemption is already disabled.
65 */
66 devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
67 __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
68 __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
69 }
70 break;
71 }
72
73 return NOTIFY_DONE;
74}
75
76static struct notifier_block crunch_notifier_block = {
77 .notifier_call = crunch_do,
78};
79
80static int __init crunch_init(void)
81{
82 thread_register_notifier(&crunch_notifier_block);
83 elf_hwcap |= HWCAP_CRUNCH;
84
85 return 0;
86}
87
88late_initcall(crunch_init);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index be16a48007b4..22f0ed324f37 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -19,7 +19,9 @@
19#include <asm/glue-df.h> 19#include <asm/glue-df.h>
20#include <asm/glue-pf.h> 20#include <asm/glue-pf.h>
21#include <asm/vfpmacros.h> 21#include <asm/vfpmacros.h>
22#ifndef CONFIG_MULTI_IRQ_HANDLER
22#include <mach/entry-macro.S> 23#include <mach/entry-macro.S>
24#endif
23#include <asm/thread_notify.h> 25#include <asm/thread_notify.h>
24#include <asm/unwind.h> 26#include <asm/unwind.h>
25#include <asm/unistd.h> 27#include <asm/unistd.h>
@@ -1101,7 +1103,6 @@ __stubs_start:
1101 * get out of that mode without clobbering one register. 1103 * get out of that mode without clobbering one register.
1102 */ 1104 */
1103vector_fiq: 1105vector_fiq:
1104 disable_fiq
1105 subs pc, lr, #4 1106 subs pc, lr, #4
1106 1107
1107/*============================================================================= 1108/*=============================================================================
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 9fd0ba90c1d2..54ee265dd819 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -10,9 +10,15 @@
10 10
11#include <asm/unistd.h> 11#include <asm/unistd.h>
12#include <asm/ftrace.h> 12#include <asm/ftrace.h>
13#include <mach/entry-macro.S>
14#include <asm/unwind.h> 13#include <asm/unwind.h>
15 14
15#ifdef CONFIG_NEED_RET_TO_USER
16#include <mach/entry-macro.S>
17#else
18 .macro arch_ret_to_user, tmp1, tmp2
19 .endm
20#endif
21
16#include "entry-header.S" 22#include "entry-header.S"
17 23
18 24
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf3d47f..8a89d3b7626b 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
180u64 180u64
181armpmu_event_update(struct perf_event *event, 181armpmu_event_update(struct perf_event *event,
182 struct hw_perf_event *hwc, 182 struct hw_perf_event *hwc,
183 int idx, int overflow) 183 int idx)
184{ 184{
185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
186 u64 delta, prev_raw_count, new_raw_count; 186 u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
193 new_raw_count) != prev_raw_count) 193 new_raw_count) != prev_raw_count)
194 goto again; 194 goto again;
195 195
196 new_raw_count &= armpmu->max_period; 196 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
197 prev_raw_count &= armpmu->max_period;
198
199 if (overflow)
200 delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
201 else
202 delta = new_raw_count - prev_raw_count;
203 197
204 local64_add(delta, &event->count); 198 local64_add(delta, &event->count);
205 local64_sub(delta, &hwc->period_left); 199 local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
216 if (hwc->idx < 0) 210 if (hwc->idx < 0)
217 return; 211 return;
218 212
219 armpmu_event_update(event, hwc, hwc->idx, 0); 213 armpmu_event_update(event, hwc, hwc->idx);
220} 214}
221 215
222static void 216static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
232 if (!(hwc->state & PERF_HES_STOPPED)) { 226 if (!(hwc->state & PERF_HES_STOPPED)) {
233 armpmu->disable(hwc, hwc->idx); 227 armpmu->disable(hwc, hwc->idx);
234 barrier(); /* why? */ 228 barrier(); /* why? */
235 armpmu_event_update(event, hwc, hwc->idx, 0); 229 armpmu_event_update(event, hwc, hwc->idx);
236 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 230 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
237 } 231 }
238} 232}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
518 hwc->config_base |= (unsigned long)mapping; 512 hwc->config_base |= (unsigned long)mapping;
519 513
520 if (!hwc->sample_period) { 514 if (!hwc->sample_period) {
521 hwc->sample_period = armpmu->max_period; 515 /*
516 * For non-sampling runs, limit the sample_period to half
517 * of the counter width. That way, the new counter value
518 * is far less likely to overtake the previous one unless
519 * you have some serious IRQ latency issues.
520 */
521 hwc->sample_period = armpmu->max_period >> 1;
522 hwc->last_period = hwc->sample_period; 522 hwc->last_period = hwc->sample_period;
523 local64_set(&hwc->period_left, hwc->sample_period); 523 local64_set(&hwc->period_left, hwc->sample_period);
524 } 524 }
@@ -539,6 +539,10 @@ static int armpmu_event_init(struct perf_event *event)
539 int err = 0; 539 int err = 0;
540 atomic_t *active_events = &armpmu->active_events; 540 atomic_t *active_events = &armpmu->active_events;
541 541
542 /* does not support taken branch sampling */
543 if (has_branch_stack(event))
544 return -EOPNOTSUPP;
545
542 if (armpmu->map_event(event) == -ENOENT) 546 if (armpmu->map_event(event) == -ENOENT)
543 return -ENOENT; 547 return -ENOENT;
544 548
@@ -680,6 +684,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
680} 684}
681 685
682/* 686/*
687 * PMU hardware loses all context when a CPU goes offline.
688 * When a CPU is hotplugged back in, since some hardware registers are
689 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
690 * junk values out of them.
691 */
692static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
693 unsigned long action, void *hcpu)
694{
695 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
696 return NOTIFY_DONE;
697
698 if (cpu_pmu && cpu_pmu->reset)
699 cpu_pmu->reset(NULL);
700
701 return NOTIFY_OK;
702}
703
704static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
705 .notifier_call = pmu_cpu_notify,
706};
707
708/*
683 * CPU PMU identification and registration. 709 * CPU PMU identification and registration.
684 */ 710 */
685static int __init 711static int __init
@@ -730,6 +756,7 @@ init_hw_perf_events(void)
730 pr_info("enabled with %s PMU driver, %d counters available\n", 756 pr_info("enabled with %s PMU driver, %d counters available\n",
731 cpu_pmu->name, cpu_pmu->num_events); 757 cpu_pmu->name, cpu_pmu->num_events);
732 cpu_pmu_init(cpu_pmu); 758 cpu_pmu_init(cpu_pmu);
759 register_cpu_notifier(&pmu_cpu_notifier);
733 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); 760 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
734 } else { 761 } else {
735 pr_info("no hardware support available\n"); 762 pr_info("no hardware support available\n");
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 533be9930ec2..b78af0cc6ef3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
467 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 467 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
468} 468}
469 469
470static int counter_is_active(unsigned long pmcr, int idx)
471{
472 unsigned long mask = 0;
473 if (idx == ARMV6_CYCLE_COUNTER)
474 mask = ARMV6_PMCR_CCOUNT_IEN;
475 else if (idx == ARMV6_COUNTER0)
476 mask = ARMV6_PMCR_COUNT0_IEN;
477 else if (idx == ARMV6_COUNTER1)
478 mask = ARMV6_PMCR_COUNT1_IEN;
479
480 if (mask)
481 return pmcr & mask;
482
483 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
484 return 0;
485}
486
487static irqreturn_t 470static irqreturn_t
488armv6pmu_handle_irq(int irq_num, 471armv6pmu_handle_irq(int irq_num,
489 void *dev) 472 void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
513 struct perf_event *event = cpuc->events[idx]; 496 struct perf_event *event = cpuc->events[idx];
514 struct hw_perf_event *hwc; 497 struct hw_perf_event *hwc;
515 498
516 if (!counter_is_active(pmcr, idx)) 499 /* Ignore if we don't have an event. */
500 if (!event)
517 continue; 501 continue;
518 502
519 /* 503 /*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
524 continue; 508 continue;
525 509
526 hwc = &event->hw; 510 hwc = &event->hw;
527 armpmu_event_update(event, hwc, idx, 1); 511 armpmu_event_update(event, hwc, idx);
528 data.period = event->hw.last_period; 512 data.period = event->hw.last_period;
529 if (!armpmu_event_set_period(event, hwc, idx)) 513 if (!armpmu_event_set_period(event, hwc, idx))
530 continue; 514 continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 6933244c68f9..4d7095af2ab3 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
809 809
810 counter = ARMV7_IDX_TO_COUNTER(idx); 810 counter = ARMV7_IDX_TO_COUNTER(idx);
811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); 811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
812 isb();
813 /* Clear the overflow flag in case an interrupt is pending. */
814 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
815 isb();
816
812 return idx; 817 return idx;
813} 818}
814 819
@@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
955 struct perf_event *event = cpuc->events[idx]; 960 struct perf_event *event = cpuc->events[idx];
956 struct hw_perf_event *hwc; 961 struct hw_perf_event *hwc;
957 962
963 /* Ignore if we don't have an event. */
964 if (!event)
965 continue;
966
958 /* 967 /*
959 * We have a single interrupt for all counters. Check that 968 * We have a single interrupt for all counters. Check that
960 * each counter has overflowed before we process it. 969 * each counter has overflowed before we process it.
@@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
963 continue; 972 continue;
964 973
965 hwc = &event->hw; 974 hwc = &event->hw;
966 armpmu_event_update(event, hwc, idx, 1); 975 armpmu_event_update(event, hwc, idx);
967 data.period = event->hw.last_period; 976 data.period = event->hw.last_period;
968 if (!armpmu_event_set_period(event, hwc, idx)) 977 if (!armpmu_event_set_period(event, hwc, idx))
969 continue; 978 continue;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3b99d8269829..71a21e6712f5 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
255 struct perf_event *event = cpuc->events[idx]; 255 struct perf_event *event = cpuc->events[idx];
256 struct hw_perf_event *hwc; 256 struct hw_perf_event *hwc;
257 257
258 if (!event)
259 continue;
260
258 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) 261 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
259 continue; 262 continue;
260 263
261 hwc = &event->hw; 264 hwc = &event->hw;
262 armpmu_event_update(event, hwc, idx, 1); 265 armpmu_event_update(event, hwc, idx);
263 data.period = event->hw.last_period; 266 data.period = event->hw.last_period;
264 if (!armpmu_event_set_period(event, hwc, idx)) 267 if (!armpmu_event_set_period(event, hwc, idx))
265 continue; 268 continue;
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
592 struct perf_event *event = cpuc->events[idx]; 595 struct perf_event *event = cpuc->events[idx];
593 struct hw_perf_event *hwc; 596 struct hw_perf_event *hwc;
594 597
595 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) 598 if (!event)
599 continue;
600
601 if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
596 continue; 602 continue;
597 603
598 hwc = &event->hw; 604 hwc = &event->hw;
599 armpmu_event_update(event, hwc, idx, 1); 605 armpmu_event_update(event, hwc, idx);
600 data.period = event->hw.last_period; 606 data.period = event->hw.last_period;
601 if (!armpmu_event_set_period(event, hwc, idx)) 607 if (!armpmu_event_set_period(event, hwc, idx))
602 continue; 608 continue;
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
663static void 669static void
664xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) 670xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
665{ 671{
666 unsigned long flags, ien, evtsel; 672 unsigned long flags, ien, evtsel, of_flags;
667 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 673 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
668 674
669 ien = xscale2pmu_read_int_enable(); 675 ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
672 switch (idx) { 678 switch (idx) {
673 case XSCALE_CYCLE_COUNTER: 679 case XSCALE_CYCLE_COUNTER:
674 ien &= ~XSCALE2_CCOUNT_INT_EN; 680 ien &= ~XSCALE2_CCOUNT_INT_EN;
681 of_flags = XSCALE2_CCOUNT_OVERFLOW;
675 break; 682 break;
676 case XSCALE_COUNTER0: 683 case XSCALE_COUNTER0:
677 ien &= ~XSCALE2_COUNT0_INT_EN; 684 ien &= ~XSCALE2_COUNT0_INT_EN;
678 evtsel &= ~XSCALE2_COUNT0_EVT_MASK; 685 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
679 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; 686 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
687 of_flags = XSCALE2_COUNT0_OVERFLOW;
680 break; 688 break;
681 case XSCALE_COUNTER1: 689 case XSCALE_COUNTER1:
682 ien &= ~XSCALE2_COUNT1_INT_EN; 690 ien &= ~XSCALE2_COUNT1_INT_EN;
683 evtsel &= ~XSCALE2_COUNT1_EVT_MASK; 691 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
684 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; 692 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
693 of_flags = XSCALE2_COUNT1_OVERFLOW;
685 break; 694 break;
686 case XSCALE_COUNTER2: 695 case XSCALE_COUNTER2:
687 ien &= ~XSCALE2_COUNT2_INT_EN; 696 ien &= ~XSCALE2_COUNT2_INT_EN;
688 evtsel &= ~XSCALE2_COUNT2_EVT_MASK; 697 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
689 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; 698 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
699 of_flags = XSCALE2_COUNT2_OVERFLOW;
690 break; 700 break;
691 case XSCALE_COUNTER3: 701 case XSCALE_COUNTER3:
692 ien &= ~XSCALE2_COUNT3_INT_EN; 702 ien &= ~XSCALE2_COUNT3_INT_EN;
693 evtsel &= ~XSCALE2_COUNT3_EVT_MASK; 703 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
694 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; 704 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
705 of_flags = XSCALE2_COUNT3_OVERFLOW;
695 break; 706 break;
696 default: 707 default:
697 WARN_ONCE(1, "invalid counter number (%d)\n", idx); 708 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
701 raw_spin_lock_irqsave(&events->pmu_lock, flags); 712 raw_spin_lock_irqsave(&events->pmu_lock, flags);
702 xscale2pmu_write_event_select(evtsel); 713 xscale2pmu_write_event_select(evtsel);
703 xscale2pmu_write_int_enable(ien); 714 xscale2pmu_write_int_enable(ien);
715 xscale2pmu_write_overflow_flags(of_flags);
704 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 716 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
705} 717}
706 718
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 971d65c253a9..d3eca4524533 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -61,8 +61,6 @@ extern void setup_mm_for_reboot(void);
61 61
62static volatile int hlt_counter; 62static volatile int hlt_counter;
63 63
64#include <mach/system.h>
65
66void disable_hlt(void) 64void disable_hlt(void)
67{ 65{
68 hlt_counter++; 66 hlt_counter++;
@@ -181,13 +179,17 @@ void cpu_idle_wait(void)
181EXPORT_SYMBOL_GPL(cpu_idle_wait); 179EXPORT_SYMBOL_GPL(cpu_idle_wait);
182 180
183/* 181/*
184 * This is our default idle handler. We need to disable 182 * This is our default idle handler.
185 * interrupts here to ensure we don't miss a wakeup call.
186 */ 183 */
184
185void (*arm_pm_idle)(void);
186
187static void default_idle(void) 187static void default_idle(void)
188{ 188{
189 if (!need_resched()) 189 if (arm_pm_idle)
190 arch_idle(); 190 arm_pm_idle();
191 else
192 cpu_do_idle();
191 local_irq_enable(); 193 local_irq_enable();
192} 194}
193 195
@@ -215,6 +217,10 @@ void cpu_idle(void)
215 cpu_die(); 217 cpu_die();
216#endif 218#endif
217 219
220 /*
221 * We need to disable interrupts here
222 * to ensure we don't miss a wakeup call.
223 */
218 local_irq_disable(); 224 local_irq_disable();
219#ifdef CONFIG_PL310_ERRATA_769419 225#ifdef CONFIG_PL310_ERRATA_769419
220 wmb(); 226 wmb();
@@ -222,26 +228,23 @@ void cpu_idle(void)
222 if (hlt_counter) { 228 if (hlt_counter) {
223 local_irq_enable(); 229 local_irq_enable();
224 cpu_relax(); 230 cpu_relax();
225 } else { 231 } else if (!need_resched()) {
226 stop_critical_timings(); 232 stop_critical_timings();
227 if (cpuidle_idle_call()) 233 if (cpuidle_idle_call())
228 pm_idle(); 234 pm_idle();
229 start_critical_timings(); 235 start_critical_timings();
230 /* 236 /*
231 * This will eventually be removed - pm_idle 237 * pm_idle functions must always
232 * functions should always return with IRQs 238 * return with IRQs enabled.
233 * enabled.
234 */ 239 */
235 WARN_ON(irqs_disabled()); 240 WARN_ON(irqs_disabled());
241 } else
236 local_irq_enable(); 242 local_irq_enable();
237 }
238 } 243 }
239 leds_event(led_idle_end); 244 leds_event(led_idle_end);
240 rcu_idle_exit(); 245 rcu_idle_exit();
241 tick_nohz_idle_exit(); 246 tick_nohz_idle_exit();
242 preempt_enable_no_resched(); 247 schedule_preempt_disabled();
243 schedule();
244 preempt_disable();
245 } 248 }
246} 249}
247 250
@@ -535,8 +538,7 @@ int vectors_user_mapping(void)
535 struct mm_struct *mm = current->mm; 538 struct mm_struct *mm = current->mm;
536 return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, 539 return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
537 VM_READ | VM_EXEC | 540 VM_READ | VM_EXEC |
538 VM_MAYREAD | VM_MAYEXEC | 541 VM_MAYREAD | VM_MAYEXEC | VM_RESERVED,
539 VM_ALWAYSDUMP | VM_RESERVED,
540 NULL); 542 NULL);
541} 543}
542 544
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index e33870ff0ac0..ede6443c34d9 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -23,6 +23,7 @@
23#include <linux/perf_event.h> 23#include <linux/perf_event.h>
24#include <linux/hw_breakpoint.h> 24#include <linux/hw_breakpoint.h>
25#include <linux/regset.h> 25#include <linux/regset.h>
26#include <linux/audit.h>
26 27
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
28#include <asm/system.h> 29#include <asm/system.h>
@@ -904,6 +905,12 @@ long arch_ptrace(struct task_struct *child, long request,
904 return ret; 905 return ret;
905} 906}
906 907
908#ifdef __ARMEB__
909#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
910#else
911#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
912#endif
913
907asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) 914asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
908{ 915{
909 unsigned long ip; 916 unsigned long ip;
@@ -918,7 +925,7 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
918 if (!ip) 925 if (!ip)
919 audit_syscall_exit(regs); 926 audit_syscall_exit(regs);
920 else 927 else
921 audit_syscall_entry(AUDIT_ARCH_ARMEB, scno, regs->ARM_r0, 928 audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
922 regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); 929 regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
923 930
924 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 931 if (!test_thread_flag(TIF_SYSCALL_TRACE))
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index cdeb727527d3..8f8cce2c46c4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -246,6 +246,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
246 store_cpu_topology(cpuid); 246 store_cpu_topology(cpuid);
247} 247}
248 248
249static void percpu_timer_setup(void);
250
249/* 251/*
250 * This is the secondary CPU boot entry. We're using this CPUs 252 * This is the secondary CPU boot entry. We're using this CPUs
251 * idle thread stack, but a set of temporary page tables. 253 * idle thread stack, but a set of temporary page tables.
@@ -295,13 +297,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
295 */ 297 */
296 percpu_timer_setup(); 298 percpu_timer_setup();
297 299
298 while (!cpu_active(cpu))
299 cpu_relax();
300
301 /*
302 * cpu_active bit is set, so it's safe to enalbe interrupts
303 * now.
304 */
305 local_irq_enable(); 300 local_irq_enable();
306 local_fiq_enable(); 301 local_fiq_enable();
307 302
@@ -459,7 +454,20 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
459 clockevents_register_device(evt); 454 clockevents_register_device(evt);
460} 455}
461 456
462void __cpuinit percpu_timer_setup(void) 457static struct local_timer_ops *lt_ops;
458
459#ifdef CONFIG_LOCAL_TIMERS
460int local_timer_register(struct local_timer_ops *ops)
461{
462 if (lt_ops)
463 return -EBUSY;
464
465 lt_ops = ops;
466 return 0;
467}
468#endif
469
470static void __cpuinit percpu_timer_setup(void)
463{ 471{
464 unsigned int cpu = smp_processor_id(); 472 unsigned int cpu = smp_processor_id();
465 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 473 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
@@ -467,7 +475,7 @@ void __cpuinit percpu_timer_setup(void)
467 evt->cpumask = cpumask_of(cpu); 475 evt->cpumask = cpumask_of(cpu);
468 evt->broadcast = smp_timer_broadcast; 476 evt->broadcast = smp_timer_broadcast;
469 477
470 if (local_timer_setup(evt)) 478 if (!lt_ops || lt_ops->setup(evt))
471 broadcast_timer_setup(evt); 479 broadcast_timer_setup(evt);
472} 480}
473 481
@@ -482,7 +490,8 @@ static void percpu_timer_stop(void)
482 unsigned int cpu = smp_processor_id(); 490 unsigned int cpu = smp_processor_id();
483 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 491 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
484 492
485 local_timer_stop(evt); 493 if (lt_ops)
494 lt_ops->stop(evt);
486} 495}
487#endif 496#endif
488 497
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 4285daa077b0..fef42b21cecb 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -18,20 +18,23 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/jiffies.h> 19#include <linux/jiffies.h>
20#include <linux/clockchips.h> 20#include <linux/clockchips.h>
21#include <linux/irq.h> 21#include <linux/interrupt.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/of_irq.h>
24#include <linux/of_address.h>
23 25
24#include <asm/smp_twd.h> 26#include <asm/smp_twd.h>
25#include <asm/localtimer.h> 27#include <asm/localtimer.h>
26#include <asm/hardware/gic.h> 28#include <asm/hardware/gic.h>
27 29
28/* set up by the platform code */ 30/* set up by the platform code */
29void __iomem *twd_base; 31static void __iomem *twd_base;
30 32
31static struct clk *twd_clk; 33static struct clk *twd_clk;
32static unsigned long twd_timer_rate; 34static unsigned long twd_timer_rate;
33 35
34static struct clock_event_device __percpu **twd_evt; 36static struct clock_event_device __percpu **twd_evt;
37static int twd_ppi;
35 38
36static void twd_set_mode(enum clock_event_mode mode, 39static void twd_set_mode(enum clock_event_mode mode,
37 struct clock_event_device *clk) 40 struct clock_event_device *clk)
@@ -77,7 +80,7 @@ static int twd_set_next_event(unsigned long evt,
77 * If a local timer interrupt has occurred, acknowledge and return 1. 80 * If a local timer interrupt has occurred, acknowledge and return 1.
78 * Otherwise, return 0. 81 * Otherwise, return 0.
79 */ 82 */
80int twd_timer_ack(void) 83static int twd_timer_ack(void)
81{ 84{
82 if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) { 85 if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
83 __raw_writel(1, twd_base + TWD_TIMER_INTSTAT); 86 __raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
@@ -87,7 +90,7 @@ int twd_timer_ack(void)
87 return 0; 90 return 0;
88} 91}
89 92
90void twd_timer_stop(struct clock_event_device *clk) 93static void twd_timer_stop(struct clock_event_device *clk)
91{ 94{
92 twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); 95 twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
93 disable_percpu_irq(clk->irq); 96 disable_percpu_irq(clk->irq);
@@ -129,7 +132,7 @@ static struct notifier_block twd_cpufreq_nb = {
129 132
130static int twd_cpufreq_init(void) 133static int twd_cpufreq_init(void)
131{ 134{
132 if (!IS_ERR(twd_clk)) 135 if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
133 return cpufreq_register_notifier(&twd_cpufreq_nb, 136 return cpufreq_register_notifier(&twd_cpufreq_nb,
134 CPUFREQ_TRANSITION_NOTIFIER); 137 CPUFREQ_TRANSITION_NOTIFIER);
135 138
@@ -222,28 +225,10 @@ static struct clk *twd_get_clock(void)
222/* 225/*
223 * Setup the local clock events for a CPU. 226 * Setup the local clock events for a CPU.
224 */ 227 */
225void __cpuinit twd_timer_setup(struct clock_event_device *clk) 228static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
226{ 229{
227 struct clock_event_device **this_cpu_clk; 230 struct clock_event_device **this_cpu_clk;
228 231
229 if (!twd_evt) {
230 int err;
231
232 twd_evt = alloc_percpu(struct clock_event_device *);
233 if (!twd_evt) {
234 pr_err("twd: can't allocate memory\n");
235 return;
236 }
237
238 err = request_percpu_irq(clk->irq, twd_handler,
239 "twd", twd_evt);
240 if (err) {
241 pr_err("twd: can't register interrupt %d (%d)\n",
242 clk->irq, err);
243 return;
244 }
245 }
246
247 if (!twd_clk) 232 if (!twd_clk)
248 twd_clk = twd_get_clock(); 233 twd_clk = twd_get_clock();
249 234
@@ -260,6 +245,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
260 clk->rating = 350; 245 clk->rating = 350;
261 clk->set_mode = twd_set_mode; 246 clk->set_mode = twd_set_mode;
262 clk->set_next_event = twd_set_next_event; 247 clk->set_next_event = twd_set_next_event;
248 clk->irq = twd_ppi;
263 249
264 this_cpu_clk = __this_cpu_ptr(twd_evt); 250 this_cpu_clk = __this_cpu_ptr(twd_evt);
265 *this_cpu_clk = clk; 251 *this_cpu_clk = clk;
@@ -267,4 +253,95 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
267 clockevents_config_and_register(clk, twd_timer_rate, 253 clockevents_config_and_register(clk, twd_timer_rate,
268 0xf, 0xffffffff); 254 0xf, 0xffffffff);
269 enable_percpu_irq(clk->irq, 0); 255 enable_percpu_irq(clk->irq, 0);
256
257 return 0;
258}
259
260static struct local_timer_ops twd_lt_ops __cpuinitdata = {
261 .setup = twd_timer_setup,
262 .stop = twd_timer_stop,
263};
264
265static int __init twd_local_timer_common_register(void)
266{
267 int err;
268
269 twd_evt = alloc_percpu(struct clock_event_device *);
270 if (!twd_evt) {
271 err = -ENOMEM;
272 goto out_free;
273 }
274
275 err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
276 if (err) {
277 pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
278 goto out_free;
279 }
280
281 err = local_timer_register(&twd_lt_ops);
282 if (err)
283 goto out_irq;
284
285 return 0;
286
287out_irq:
288 free_percpu_irq(twd_ppi, twd_evt);
289out_free:
290 iounmap(twd_base);
291 twd_base = NULL;
292 free_percpu(twd_evt);
293
294 return err;
270} 295}
296
297int __init twd_local_timer_register(struct twd_local_timer *tlt)
298{
299 if (twd_base || twd_evt)
300 return -EBUSY;
301
302 twd_ppi = tlt->res[1].start;
303
304 twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0]));
305 if (!twd_base)
306 return -ENOMEM;
307
308 return twd_local_timer_common_register();
309}
310
311#ifdef CONFIG_OF
312const static struct of_device_id twd_of_match[] __initconst = {
313 { .compatible = "arm,cortex-a9-twd-timer", },
314 { .compatible = "arm,cortex-a5-twd-timer", },
315 { .compatible = "arm,arm11mp-twd-timer", },
316 { },
317};
318
319void __init twd_local_timer_of_register(void)
320{
321 struct device_node *np;
322 int err;
323
324 np = of_find_matching_node(NULL, twd_of_match);
325 if (!np) {
326 err = -ENODEV;
327 goto out;
328 }
329
330 twd_ppi = irq_of_parse_and_map(np, 0);
331 if (!twd_ppi) {
332 err = -EINVAL;
333 goto out;
334 }
335
336 twd_base = of_iomap(np, 0);
337 if (!twd_base) {
338 err = -ENOMEM;
339 goto out;
340 }
341
342 err = twd_local_timer_common_register();
343
344out:
345 WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
346}
347#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 99a572702509..f84dfe67724f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -266,6 +266,7 @@ void die(const char *str, struct pt_regs *regs, int err)
266{ 266{
267 struct thread_info *thread = current_thread_info(); 267 struct thread_info *thread = current_thread_info();
268 int ret; 268 int ret;
269 enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
269 270
270 oops_enter(); 271 oops_enter();
271 272
@@ -273,7 +274,9 @@ void die(const char *str, struct pt_regs *regs, int err)
273 console_verbose(); 274 console_verbose();
274 bust_spinlocks(1); 275 bust_spinlocks(1);
275 if (!user_mode(regs)) 276 if (!user_mode(regs))
276 report_bug(regs->ARM_pc, regs); 277 bug_type = report_bug(regs->ARM_pc, regs);
278 if (bug_type != BUG_TRAP_TYPE_NONE)
279 str = "Oops - BUG";
277 ret = __die(str, err, thread, regs); 280 ret = __die(str, err, thread, regs);
278 281
279 if (regs && kexec_should_crash(thread->task)) 282 if (regs && kexec_should_crash(thread->task))
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 1e19691e0406..43a31fb06318 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -10,6 +10,7 @@
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12#define PROC_INFO \ 12#define PROC_INFO \
13 . = ALIGN(4); \
13 VMLINUX_SYMBOL(__proc_info_begin) = .; \ 14 VMLINUX_SYMBOL(__proc_info_begin) = .; \
14 *(.proc.info.init) \ 15 *(.proc.info.init) \
15 VMLINUX_SYMBOL(__proc_info_end) = .; 16 VMLINUX_SYMBOL(__proc_info_end) = .;