aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-27 18:14:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-27 18:14:26 -0400
commitcea8f46c36c3f82860b038aa23a46e16757666ba (patch)
treee09dc37d2b6880d86dac09afbc0c686139d86df0 /arch/arm/kernel
parentc1e7179a38919f02dd950801529176b72f5e5a8a (diff)
parent91b006def384d8f07f9f324ab211fefe2b085c90 (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King: "First ARM push of this merge window, post me coming back from holiday. This is what has been in linux-next for the last few weeks. Not much to say which isn't described by the commit summaries." * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (32 commits) ARM: 7463/1: topology: Update cpu_power according to DT information ARM: 7462/1: topology: factorize the update of sibling masks ARM: 7461/1: topology: Add arch_scale_freq_power function ARM: 7456/1: ptrace: provide separate functions for tracing syscall {entry,exit} ARM: 7455/1: audit: move syscall auditing until after ptrace SIGTRAP handling ARM: 7454/1: entry: don't bother with syscall tracing on ret_from_fork path ARM: 7453/1: audit: only allow syscall auditing for pure EABI userspace ARM: 7452/1: delay: allow timer-based delay implementation to be selected ARM: 7451/1: arch timer: implement read_current_timer and get_cycles ARM: 7450/1: dcache: select DCACHE_WORD_ACCESS for little-endian ARMv6+ CPUs ARM: 7449/1: use generic strnlen_user and strncpy_from_user functions ARM: 7448/1: perf: remove arm_perf_pmu_ids global enumeration ARM: 7447/1: rwlocks: remove unused branch labels from trylock routines ARM: 7446/1: spinlock: use ticket algorithm for ARMv6+ locking implementation ARM: 7445/1: mm: update CONTEXTIDR register to contain PID of current process ARM: 7444/1: kernel: add arch-timer C3STOP feature ARM: 7460/1: remove asm/locks.h ARM: 7439/1: head.S: simplify initial page table mapping ARM: 7437/1: zImage: Allow DTB command line concatenation with ATAG_CMDLINE ARM: 7436/1: Do not map the vectors page as write-through on UP systems ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/arch_timer.c13
-rw-r--r--arch/arm/kernel/armksyms.c7
-rw-r--r--arch/arm/kernel/entry-common.S20
-rw-r--r--arch/arm/kernel/head.S59
-rw-r--r--arch/arm/kernel/perf_event.c15
-rw-r--r--arch/arm/kernel/perf_event_v6.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c5
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
-rw-r--r--arch/arm/kernel/ptrace.c34
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kernel/topology.c239
-rw-r--r--arch/arm/kernel/traps.c78
12 files changed, 345 insertions, 131 deletions
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index dd58035621f7..cf258807160d 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -32,6 +32,8 @@ static int arch_timer_ppi2;
32 32
33static struct clock_event_device __percpu **arch_timer_evt; 33static struct clock_event_device __percpu **arch_timer_evt;
34 34
35extern void init_current_timer_delay(unsigned long freq);
36
35/* 37/*
36 * Architected system timer support. 38 * Architected system timer support.
37 */ 39 */
@@ -137,7 +139,7 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
137 /* Be safe... */ 139 /* Be safe... */
138 arch_timer_disable(); 140 arch_timer_disable();
139 141
140 clk->features = CLOCK_EVT_FEAT_ONESHOT; 142 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
141 clk->name = "arch_sys_timer"; 143 clk->name = "arch_sys_timer";
142 clk->rating = 450; 144 clk->rating = 450;
143 clk->set_mode = arch_timer_set_mode; 145 clk->set_mode = arch_timer_set_mode;
@@ -223,6 +225,14 @@ static cycle_t arch_counter_read(struct clocksource *cs)
223 return arch_counter_get_cntpct(); 225 return arch_counter_get_cntpct();
224} 226}
225 227
228int read_current_timer(unsigned long *timer_val)
229{
230 if (!arch_timer_rate)
231 return -ENXIO;
232 *timer_val = arch_counter_get_cntpct();
233 return 0;
234}
235
226static struct clocksource clocksource_counter = { 236static struct clocksource clocksource_counter = {
227 .name = "arch_sys_counter", 237 .name = "arch_sys_counter",
228 .rating = 400, 238 .rating = 400,
@@ -296,6 +306,7 @@ static int __init arch_timer_register(void)
296 if (err) 306 if (err)
297 goto out_free_irq; 307 goto out_free_irq;
298 308
309 init_current_timer_delay(arch_timer_rate);
299 return 0; 310 return 0;
300 311
301out_free_irq: 312out_free_irq:
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index b57c75e0b01f..60d3b738d420 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -49,8 +49,7 @@ extern void __aeabi_ulcmp(void);
49extern void fpundefinstr(void); 49extern void fpundefinstr(void);
50 50
51 /* platform dependent support */ 51 /* platform dependent support */
52EXPORT_SYMBOL(__udelay); 52EXPORT_SYMBOL(arm_delay_ops);
53EXPORT_SYMBOL(__const_udelay);
54 53
55 /* networking */ 54 /* networking */
56EXPORT_SYMBOL(csum_partial); 55EXPORT_SYMBOL(csum_partial);
@@ -87,10 +86,6 @@ EXPORT_SYMBOL(memmove);
87EXPORT_SYMBOL(memchr); 86EXPORT_SYMBOL(memchr);
88EXPORT_SYMBOL(__memzero); 87EXPORT_SYMBOL(__memzero);
89 88
90 /* user mem (segment) */
91EXPORT_SYMBOL(__strnlen_user);
92EXPORT_SYMBOL(__strncpy_from_user);
93
94#ifdef CONFIG_MMU 89#ifdef CONFIG_MMU
95EXPORT_SYMBOL(copy_page); 90EXPORT_SYMBOL(copy_page);
96 91
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 4afed88d250a..49d9f9305247 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -95,13 +95,7 @@ ENDPROC(ret_to_user)
95ENTRY(ret_from_fork) 95ENTRY(ret_from_fork)
96 bl schedule_tail 96 bl schedule_tail
97 get_thread_info tsk 97 get_thread_info tsk
98 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
99 mov why, #1 98 mov why, #1
100 tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
101 beq ret_slow_syscall
102 mov r1, sp
103 mov r0, #1 @ trace exit [IP = 1]
104 bl syscall_trace
105 b ret_slow_syscall 99 b ret_slow_syscall
106ENDPROC(ret_from_fork) 100ENDPROC(ret_from_fork)
107 101
@@ -448,10 +442,9 @@ ENDPROC(vector_swi)
448 * context switches, and waiting for our parent to respond. 442 * context switches, and waiting for our parent to respond.
449 */ 443 */
450__sys_trace: 444__sys_trace:
451 mov r2, scno 445 mov r1, scno
452 add r1, sp, #S_OFF 446 add r0, sp, #S_OFF
453 mov r0, #0 @ trace entry [IP = 0] 447 bl syscall_trace_enter
454 bl syscall_trace
455 448
456 adr lr, BSYM(__sys_trace_return) @ return address 449 adr lr, BSYM(__sys_trace_return) @ return address
457 mov scno, r0 @ syscall number (possibly new) 450 mov scno, r0 @ syscall number (possibly new)
@@ -463,10 +456,9 @@ __sys_trace:
463 456
464__sys_trace_return: 457__sys_trace_return:
465 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 458 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
466 mov r2, scno 459 mov r1, scno
467 mov r1, sp 460 mov r0, sp
468 mov r0, #1 @ trace exit [IP = 1] 461 bl syscall_trace_exit
469 bl syscall_trace
470 b ret_slow_syscall 462 b ret_slow_syscall
471 463
472 .align 5 464 .align 5
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 835898e7d704..3db960e20cb8 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -55,14 +55,6 @@
55 add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE 55 add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
56 .endm 56 .endm
57 57
58#ifdef CONFIG_XIP_KERNEL
59#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
60#define KERNEL_END _edata_loc
61#else
62#define KERNEL_START KERNEL_RAM_VADDR
63#define KERNEL_END _end
64#endif
65
66/* 58/*
67 * Kernel startup entry point. 59 * Kernel startup entry point.
68 * --------------------------- 60 * ---------------------------
@@ -218,51 +210,46 @@ __create_page_tables:
218 blo 1b 210 blo 1b
219 211
220 /* 212 /*
221 * Now setup the pagetables for our kernel direct 213 * Map our RAM from the start to the end of the kernel .bss section.
222 * mapped region.
223 */ 214 */
224 mov r3, pc 215 add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
225 mov r3, r3, lsr #SECTION_SHIFT 216 ldr r6, =(_end - 1)
226 orr r3, r7, r3, lsl #SECTION_SHIFT 217 orr r3, r8, r7
227 add r0, r4, #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
228 str r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
229 ldr r6, =(KERNEL_END - 1)
230 add r0, r0, #1 << PMD_ORDER
231 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 218 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
2321: cmp r0, r6 2191: str r3, [r0], #1 << PMD_ORDER
233 add r3, r3, #1 << SECTION_SHIFT 220 add r3, r3, #1 << SECTION_SHIFT
234 strls r3, [r0], #1 << PMD_ORDER 221 cmp r0, r6
235 bls 1b 222 bls 1b
236 223
237#ifdef CONFIG_XIP_KERNEL 224#ifdef CONFIG_XIP_KERNEL
238 /* 225 /*
239 * Map some ram to cover our .data and .bss areas. 226 * Map the kernel image separately as it is not located in RAM.
240 */ 227 */
241 add r3, r8, #TEXT_OFFSET 228#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
242 orr r3, r3, r7 229 mov r3, pc
243 add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) 230 mov r3, r3, lsr #SECTION_SHIFT
244 str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> (SECTION_SHIFT - PMD_ORDER)]! 231 orr r3, r7, r3, lsl #SECTION_SHIFT
245 ldr r6, =(_end - 1) 232 add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
246 add r0, r0, #4 233 str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
234 ldr r6, =(_edata_loc - 1)
235 add r0, r0, #1 << PMD_ORDER
247 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 236 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
2481: cmp r0, r6 2371: cmp r0, r6
249 add r3, r3, #1 << 20 238 add r3, r3, #1 << SECTION_SHIFT
250 strls r3, [r0], #4 239 strls r3, [r0], #1 << PMD_ORDER
251 bls 1b 240 bls 1b
252#endif 241#endif
253 242
254 /* 243 /*
255 * Then map boot params address in r2 or the first 1MB (2MB with LPAE) 244 * Then map boot params address in r2 if specified.
256 * of ram if boot params address is not specified.
257 */ 245 */
258 mov r0, r2, lsr #SECTION_SHIFT 246 mov r0, r2, lsr #SECTION_SHIFT
259 movs r0, r0, lsl #SECTION_SHIFT 247 movs r0, r0, lsl #SECTION_SHIFT
260 moveq r0, r8 248 subne r3, r0, r8
261 sub r3, r0, r8 249 addne r3, r3, #PAGE_OFFSET
262 add r3, r3, #PAGE_OFFSET 250 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
263 add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 251 orrne r6, r7, r0
264 orr r6, r7, r0 252 strne r6, [r3]
265 str r6, [r3]
266 253
267#ifdef CONFIG_DEBUG_LL 254#ifdef CONFIG_DEBUG_LL
268#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) 255#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index a02eada3aa5d..ab243b87118d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -47,17 +47,14 @@ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
47/* Set at runtime when we know what CPU type we are. */ 47/* Set at runtime when we know what CPU type we are. */
48static struct arm_pmu *cpu_pmu; 48static struct arm_pmu *cpu_pmu;
49 49
50enum arm_perf_pmu_ids 50const char *perf_pmu_name(void)
51armpmu_get_pmu_id(void)
52{ 51{
53 int id = -ENODEV; 52 if (!cpu_pmu)
54 53 return NULL;
55 if (cpu_pmu != NULL)
56 id = cpu_pmu->id;
57 54
58 return id; 55 return cpu_pmu->pmu.name;
59} 56}
60EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); 57EXPORT_SYMBOL_GPL(perf_pmu_name);
61 58
62int perf_num_counters(void) 59int perf_num_counters(void)
63{ 60{
@@ -760,7 +757,7 @@ init_hw_perf_events(void)
760 cpu_pmu->name, cpu_pmu->num_events); 757 cpu_pmu->name, cpu_pmu->num_events);
761 cpu_pmu_init(cpu_pmu); 758 cpu_pmu_init(cpu_pmu);
762 register_cpu_notifier(&pmu_cpu_notifier); 759 register_cpu_notifier(&pmu_cpu_notifier);
763 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); 760 armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
764 } else { 761 } else {
765 pr_info("no hardware support available\n"); 762 pr_info("no hardware support available\n");
766 } 763 }
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index ab627a740fa3..c90fcb2b6967 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -650,7 +650,6 @@ static int armv6_map_event(struct perf_event *event)
650} 650}
651 651
652static struct arm_pmu armv6pmu = { 652static struct arm_pmu armv6pmu = {
653 .id = ARM_PERF_PMU_ID_V6,
654 .name = "v6", 653 .name = "v6",
655 .handle_irq = armv6pmu_handle_irq, 654 .handle_irq = armv6pmu_handle_irq,
656 .enable = armv6pmu_enable_event, 655 .enable = armv6pmu_enable_event,
@@ -685,7 +684,6 @@ static int armv6mpcore_map_event(struct perf_event *event)
685} 684}
686 685
687static struct arm_pmu armv6mpcore_pmu = { 686static struct arm_pmu armv6mpcore_pmu = {
688 .id = ARM_PERF_PMU_ID_V6MP,
689 .name = "v6mpcore", 687 .name = "v6mpcore",
690 .handle_irq = armv6pmu_handle_irq, 688 .handle_irq = armv6pmu_handle_irq,
691 .enable = armv6pmu_enable_event, 689 .enable = armv6pmu_enable_event,
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index d3c536068162..f04070bd2183 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1258,7 +1258,6 @@ static u32 __init armv7_read_num_pmnc_events(void)
1258 1258
1259static struct arm_pmu *__init armv7_a8_pmu_init(void) 1259static struct arm_pmu *__init armv7_a8_pmu_init(void)
1260{ 1260{
1261 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
1262 armv7pmu.name = "ARMv7 Cortex-A8"; 1261 armv7pmu.name = "ARMv7 Cortex-A8";
1263 armv7pmu.map_event = armv7_a8_map_event; 1262 armv7pmu.map_event = armv7_a8_map_event;
1264 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1263 armv7pmu.num_events = armv7_read_num_pmnc_events();
@@ -1267,7 +1266,6 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void)
1267 1266
1268static struct arm_pmu *__init armv7_a9_pmu_init(void) 1267static struct arm_pmu *__init armv7_a9_pmu_init(void)
1269{ 1268{
1270 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
1271 armv7pmu.name = "ARMv7 Cortex-A9"; 1269 armv7pmu.name = "ARMv7 Cortex-A9";
1272 armv7pmu.map_event = armv7_a9_map_event; 1270 armv7pmu.map_event = armv7_a9_map_event;
1273 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1271 armv7pmu.num_events = armv7_read_num_pmnc_events();
@@ -1276,7 +1274,6 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void)
1276 1274
1277static struct arm_pmu *__init armv7_a5_pmu_init(void) 1275static struct arm_pmu *__init armv7_a5_pmu_init(void)
1278{ 1276{
1279 armv7pmu.id = ARM_PERF_PMU_ID_CA5;
1280 armv7pmu.name = "ARMv7 Cortex-A5"; 1277 armv7pmu.name = "ARMv7 Cortex-A5";
1281 armv7pmu.map_event = armv7_a5_map_event; 1278 armv7pmu.map_event = armv7_a5_map_event;
1282 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1279 armv7pmu.num_events = armv7_read_num_pmnc_events();
@@ -1285,7 +1282,6 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void)
1285 1282
1286static struct arm_pmu *__init armv7_a15_pmu_init(void) 1283static struct arm_pmu *__init armv7_a15_pmu_init(void)
1287{ 1284{
1288 armv7pmu.id = ARM_PERF_PMU_ID_CA15;
1289 armv7pmu.name = "ARMv7 Cortex-A15"; 1285 armv7pmu.name = "ARMv7 Cortex-A15";
1290 armv7pmu.map_event = armv7_a15_map_event; 1286 armv7pmu.map_event = armv7_a15_map_event;
1291 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1287 armv7pmu.num_events = armv7_read_num_pmnc_events();
@@ -1295,7 +1291,6 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
1295 1291
1296static struct arm_pmu *__init armv7_a7_pmu_init(void) 1292static struct arm_pmu *__init armv7_a7_pmu_init(void)
1297{ 1293{
1298 armv7pmu.id = ARM_PERF_PMU_ID_CA7;
1299 armv7pmu.name = "ARMv7 Cortex-A7"; 1294 armv7pmu.name = "ARMv7 Cortex-A7";
1300 armv7pmu.map_event = armv7_a7_map_event; 1295 armv7pmu.map_event = armv7_a7_map_event;
1301 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1296 armv7pmu.num_events = armv7_read_num_pmnc_events();
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index e34e7254e652..f759fe0bab63 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -435,7 +435,6 @@ static int xscale_map_event(struct perf_event *event)
435} 435}
436 436
437static struct arm_pmu xscale1pmu = { 437static struct arm_pmu xscale1pmu = {
438 .id = ARM_PERF_PMU_ID_XSCALE1,
439 .name = "xscale1", 438 .name = "xscale1",
440 .handle_irq = xscale1pmu_handle_irq, 439 .handle_irq = xscale1pmu_handle_irq,
441 .enable = xscale1pmu_enable_event, 440 .enable = xscale1pmu_enable_event,
@@ -803,7 +802,6 @@ xscale2pmu_write_counter(int counter, u32 val)
803} 802}
804 803
805static struct arm_pmu xscale2pmu = { 804static struct arm_pmu xscale2pmu = {
806 .id = ARM_PERF_PMU_ID_XSCALE2,
807 .name = "xscale2", 805 .name = "xscale2",
808 .handle_irq = xscale2pmu_handle_irq, 806 .handle_irq = xscale2pmu_handle_irq,
809 .enable = xscale2pmu_enable_event, 807 .enable = xscale2pmu_enable_event,
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 14e38261cd31..dab711e6e1ca 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -907,16 +907,16 @@ long arch_ptrace(struct task_struct *child, long request,
907 return ret; 907 return ret;
908} 908}
909 909
910asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) 910enum ptrace_syscall_dir {
911 PTRACE_SYSCALL_ENTER = 0,
912 PTRACE_SYSCALL_EXIT,
913};
914
915static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
916 enum ptrace_syscall_dir dir)
911{ 917{
912 unsigned long ip; 918 unsigned long ip;
913 919
914 if (why)
915 audit_syscall_exit(regs);
916 else
917 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
918 regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
919
920 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 920 if (!test_thread_flag(TIF_SYSCALL_TRACE))
921 return scno; 921 return scno;
922 922
@@ -927,14 +927,28 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
927 * IP = 0 -> entry, =1 -> exit 927 * IP = 0 -> entry, =1 -> exit
928 */ 928 */
929 ip = regs->ARM_ip; 929 ip = regs->ARM_ip;
930 regs->ARM_ip = why; 930 regs->ARM_ip = dir;
931 931
932 if (why) 932 if (dir == PTRACE_SYSCALL_EXIT)
933 tracehook_report_syscall_exit(regs, 0); 933 tracehook_report_syscall_exit(regs, 0);
934 else if (tracehook_report_syscall_entry(regs)) 934 else if (tracehook_report_syscall_entry(regs))
935 current_thread_info()->syscall = -1; 935 current_thread_info()->syscall = -1;
936 936
937 regs->ARM_ip = ip; 937 regs->ARM_ip = ip;
938
939 return current_thread_info()->syscall; 938 return current_thread_info()->syscall;
940} 939}
940
941asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
942{
943 int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER);
944 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1,
945 regs->ARM_r2, regs->ARM_r3);
946 return ret;
947}
948
949asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno)
950{
951 int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT);
952 audit_syscall_exit(regs);
953 return ret;
954}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2c7217d971db..aea74f5bc34a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -179,7 +179,7 @@ void __ref cpu_die(void)
179 mb(); 179 mb();
180 180
181 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 181 /* Tell __cpu_die() that this CPU is now safe to dispose of */
182 complete(&cpu_died); 182 RCU_NONIDLE(complete(&cpu_died));
183 183
184 /* 184 /*
185 * actual CPU shutdown procedure is at least platform (if not 185 * actual CPU shutdown procedure is at least platform (if not
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 8200deaa14f6..198b08456e90 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -17,11 +17,190 @@
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/node.h> 18#include <linux/node.h>
19#include <linux/nodemask.h> 19#include <linux/nodemask.h>
20#include <linux/of.h>
20#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/slab.h>
21 23
22#include <asm/cputype.h> 24#include <asm/cputype.h>
23#include <asm/topology.h> 25#include <asm/topology.h>
24 26
27/*
28 * cpu power scale management
29 */
30
31/*
32 * cpu power table
33 * This per cpu data structure describes the relative capacity of each core.
34 * On a heteregenous system, cores don't have the same computation capacity
35 * and we reflect that difference in the cpu_power field so the scheduler can
36 * take this difference into account during load balance. A per cpu structure
37 * is preferred because each CPU updates its own cpu_power field during the
38 * load balance except for idle cores. One idle core is selected to run the
39 * rebalance_domains for all idle cores and the cpu_power can be updated
40 * during this sequence.
41 */
42static DEFINE_PER_CPU(unsigned long, cpu_scale);
43
44unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
45{
46 return per_cpu(cpu_scale, cpu);
47}
48
49static void set_power_scale(unsigned int cpu, unsigned long power)
50{
51 per_cpu(cpu_scale, cpu) = power;
52}
53
54#ifdef CONFIG_OF
55struct cpu_efficiency {
56 const char *compatible;
57 unsigned long efficiency;
58};
59
60/*
61 * Table of relative efficiency of each processors
62 * The efficiency value must fit in 20bit and the final
63 * cpu_scale value must be in the range
64 * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
65 * in order to return at most 1 when DIV_ROUND_CLOSEST
66 * is used to compute the capacity of a CPU.
67 * Processors that are not defined in the table,
68 * use the default SCHED_POWER_SCALE value for cpu_scale.
69 */
70struct cpu_efficiency table_efficiency[] = {
71 {"arm,cortex-a15", 3891},
72 {"arm,cortex-a7", 2048},
73 {NULL, },
74};
75
76struct cpu_capacity {
77 unsigned long hwid;
78 unsigned long capacity;
79};
80
81struct cpu_capacity *cpu_capacity;
82
83unsigned long middle_capacity = 1;
84
85/*
86 * Iterate all CPUs' descriptor in DT and compute the efficiency
87 * (as per table_efficiency). Also calculate a middle efficiency
88 * as close as possible to (max{eff_i} - min{eff_i}) / 2
89 * This is later used to scale the cpu_power field such that an
90 * 'average' CPU is of middle power. Also see the comments near
91 * table_efficiency[] and update_cpu_power().
92 */
93static void __init parse_dt_topology(void)
94{
95 struct cpu_efficiency *cpu_eff;
96 struct device_node *cn = NULL;
97 unsigned long min_capacity = (unsigned long)(-1);
98 unsigned long max_capacity = 0;
99 unsigned long capacity = 0;
100 int alloc_size, cpu = 0;
101
102 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
103 cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT);
104
105 while ((cn = of_find_node_by_type(cn, "cpu"))) {
106 const u32 *rate, *reg;
107 int len;
108
109 if (cpu >= num_possible_cpus())
110 break;
111
112 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
113 if (of_device_is_compatible(cn, cpu_eff->compatible))
114 break;
115
116 if (cpu_eff->compatible == NULL)
117 continue;
118
119 rate = of_get_property(cn, "clock-frequency", &len);
120 if (!rate || len != 4) {
121 pr_err("%s missing clock-frequency property\n",
122 cn->full_name);
123 continue;
124 }
125
126 reg = of_get_property(cn, "reg", &len);
127 if (!reg || len != 4) {
128 pr_err("%s missing reg property\n", cn->full_name);
129 continue;
130 }
131
132 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
133
134 /* Save min capacity of the system */
135 if (capacity < min_capacity)
136 min_capacity = capacity;
137
138 /* Save max capacity of the system */
139 if (capacity > max_capacity)
140 max_capacity = capacity;
141
142 cpu_capacity[cpu].capacity = capacity;
143 cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
144 }
145
146 if (cpu < num_possible_cpus())
147 cpu_capacity[cpu].hwid = (unsigned long)(-1);
148
149 /* If min and max capacities are equals, we bypass the update of the
150 * cpu_scale because all CPUs have the same capacity. Otherwise, we
151 * compute a middle_capacity factor that will ensure that the capacity
152 * of an 'average' CPU of the system will be as close as possible to
153 * SCHED_POWER_SCALE, which is the default value, but with the
154 * constraint explained near table_efficiency[].
155 */
156 if (min_capacity == max_capacity)
157 cpu_capacity[0].hwid = (unsigned long)(-1);
158 else if (4*max_capacity < (3*(max_capacity + min_capacity)))
159 middle_capacity = (min_capacity + max_capacity)
160 >> (SCHED_POWER_SHIFT+1);
161 else
162 middle_capacity = ((max_capacity / 3)
163 >> (SCHED_POWER_SHIFT-1)) + 1;
164
165}
166
167/*
168 * Look for a customed capacity of a CPU in the cpu_capacity table during the
169 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
170 * function returns directly for SMP system.
171 */
172void update_cpu_power(unsigned int cpu, unsigned long hwid)
173{
174 unsigned int idx = 0;
175
176 /* look for the cpu's hwid in the cpu capacity table */
177 for (idx = 0; idx < num_possible_cpus(); idx++) {
178 if (cpu_capacity[idx].hwid == hwid)
179 break;
180
181 if (cpu_capacity[idx].hwid == -1)
182 return;
183 }
184
185 if (idx == num_possible_cpus())
186 return;
187
188 set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
189
190 printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
191 cpu, arch_scale_freq_power(NULL, cpu));
192}
193
194#else
195static inline void parse_dt_topology(void) {}
196static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
197#endif
198
199
200/*
201 * cpu topology management
202 */
203
25#define MPIDR_SMP_BITMASK (0x3 << 30) 204#define MPIDR_SMP_BITMASK (0x3 << 30)
26#define MPIDR_SMP_VALUE (0x2 << 30) 205#define MPIDR_SMP_VALUE (0x2 << 30)
27 206
@@ -31,6 +210,7 @@
31 * These masks reflect the current use of the affinity levels. 210 * These masks reflect the current use of the affinity levels.
32 * The affinity level can be up to 16 bits according to ARM ARM 211 * The affinity level can be up to 16 bits according to ARM ARM
33 */ 212 */
213#define MPIDR_HWID_BITMASK 0xFFFFFF
34 214
35#define MPIDR_LEVEL0_MASK 0x3 215#define MPIDR_LEVEL0_MASK 0x3
36#define MPIDR_LEVEL0_SHIFT 0 216#define MPIDR_LEVEL0_SHIFT 0
@@ -41,6 +221,9 @@
41#define MPIDR_LEVEL2_MASK 0xFF 221#define MPIDR_LEVEL2_MASK 0xFF
42#define MPIDR_LEVEL2_SHIFT 16 222#define MPIDR_LEVEL2_SHIFT 16
43 223
224/*
225 * cpu topology table
226 */
44struct cputopo_arm cpu_topology[NR_CPUS]; 227struct cputopo_arm cpu_topology[NR_CPUS];
45 228
46const struct cpumask *cpu_coregroup_mask(int cpu) 229const struct cpumask *cpu_coregroup_mask(int cpu)
@@ -48,6 +231,32 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
48 return &cpu_topology[cpu].core_sibling; 231 return &cpu_topology[cpu].core_sibling;
49} 232}
50 233
234void update_siblings_masks(unsigned int cpuid)
235{
236 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
237 int cpu;
238
239 /* update core and thread sibling masks */
240 for_each_possible_cpu(cpu) {
241 cpu_topo = &cpu_topology[cpu];
242
243 if (cpuid_topo->socket_id != cpu_topo->socket_id)
244 continue;
245
246 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
247 if (cpu != cpuid)
248 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
249
250 if (cpuid_topo->core_id != cpu_topo->core_id)
251 continue;
252
253 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
254 if (cpu != cpuid)
255 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
256 }
257 smp_wmb();
258}
259
51/* 260/*
52 * store_cpu_topology is called at boot when only one cpu is running 261 * store_cpu_topology is called at boot when only one cpu is running
53 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, 262 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
@@ -57,7 +266,6 @@ void store_cpu_topology(unsigned int cpuid)
57{ 266{
58 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; 267 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
59 unsigned int mpidr; 268 unsigned int mpidr;
60 unsigned int cpu;
61 269
62 /* If the cpu topology has been already set, just return */ 270 /* If the cpu topology has been already set, just return */
63 if (cpuid_topo->core_id != -1) 271 if (cpuid_topo->core_id != -1)
@@ -99,26 +307,9 @@ void store_cpu_topology(unsigned int cpuid)
99 cpuid_topo->socket_id = -1; 307 cpuid_topo->socket_id = -1;
100 } 308 }
101 309
102 /* update core and thread sibling masks */ 310 update_siblings_masks(cpuid);
103 for_each_possible_cpu(cpu) { 311
104 struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; 312 update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK);
105
106 if (cpuid_topo->socket_id == cpu_topo->socket_id) {
107 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
108 if (cpu != cpuid)
109 cpumask_set_cpu(cpu,
110 &cpuid_topo->core_sibling);
111
112 if (cpuid_topo->core_id == cpu_topo->core_id) {
113 cpumask_set_cpu(cpuid,
114 &cpu_topo->thread_sibling);
115 if (cpu != cpuid)
116 cpumask_set_cpu(cpu,
117 &cpuid_topo->thread_sibling);
118 }
119 }
120 }
121 smp_wmb();
122 313
123 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", 314 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
124 cpuid, cpu_topology[cpuid].thread_id, 315 cpuid, cpu_topology[cpuid].thread_id,
@@ -134,7 +325,7 @@ void init_cpu_topology(void)
134{ 325{
135 unsigned int cpu; 326 unsigned int cpu;
136 327
137 /* init core mask */ 328 /* init core mask and power*/
138 for_each_possible_cpu(cpu) { 329 for_each_possible_cpu(cpu) {
139 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); 330 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
140 331
@@ -143,6 +334,10 @@ void init_cpu_topology(void)
143 cpu_topo->socket_id = -1; 334 cpu_topo->socket_id = -1;
144 cpumask_clear(&cpu_topo->core_sibling); 335 cpumask_clear(&cpu_topo->core_sibling);
145 cpumask_clear(&cpu_topo->thread_sibling); 336 cpumask_clear(&cpu_topo->thread_sibling);
337
338 set_power_scale(cpu, SCHED_POWER_SCALE);
146 } 339 }
147 smp_wmb(); 340 smp_wmb();
341
342 parse_dt_topology();
148} 343}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3647170e9a16..8b97d739b17b 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -233,9 +233,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
233#define S_ISA " ARM" 233#define S_ISA " ARM"
234#endif 234#endif
235 235
236static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) 236static int __die(const char *str, int err, struct pt_regs *regs)
237{ 237{
238 struct task_struct *tsk = thread->task; 238 struct task_struct *tsk = current;
239 static int die_counter; 239 static int die_counter;
240 int ret; 240 int ret;
241 241
@@ -245,12 +245,12 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
245 /* trap and error numbers are mostly meaningless on ARM */ 245 /* trap and error numbers are mostly meaningless on ARM */
246 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); 246 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
247 if (ret == NOTIFY_STOP) 247 if (ret == NOTIFY_STOP)
248 return ret; 248 return 1;
249 249
250 print_modules(); 250 print_modules();
251 __show_regs(regs); 251 __show_regs(regs);
252 printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", 252 printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
253 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); 253 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
254 254
255 if (!user_mode(regs) || in_interrupt()) { 255 if (!user_mode(regs) || in_interrupt()) {
256 dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, 256 dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
@@ -259,45 +259,77 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
259 dump_instr(KERN_EMERG, regs); 259 dump_instr(KERN_EMERG, regs);
260 } 260 }
261 261
262 return ret; 262 return 0;
263} 263}
264 264
265static DEFINE_RAW_SPINLOCK(die_lock); 265static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
266static int die_owner = -1;
267static unsigned int die_nest_count;
266 268
267/* 269static unsigned long oops_begin(void)
268 * This function is protected against re-entrancy.
269 */
270void die(const char *str, struct pt_regs *regs, int err)
271{ 270{
272 struct thread_info *thread = current_thread_info(); 271 int cpu;
273 int ret; 272 unsigned long flags;
274 enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
275 273
276 oops_enter(); 274 oops_enter();
277 275
278 raw_spin_lock_irq(&die_lock); 276 /* racy, but better than risking deadlock. */
277 raw_local_irq_save(flags);
278 cpu = smp_processor_id();
279 if (!arch_spin_trylock(&die_lock)) {
280 if (cpu == die_owner)
281 /* nested oops. should stop eventually */;
282 else
283 arch_spin_lock(&die_lock);
284 }
285 die_nest_count++;
286 die_owner = cpu;
279 console_verbose(); 287 console_verbose();
280 bust_spinlocks(1); 288 bust_spinlocks(1);
281 if (!user_mode(regs)) 289 return flags;
282 bug_type = report_bug(regs->ARM_pc, regs); 290}
283 if (bug_type != BUG_TRAP_TYPE_NONE)
284 str = "Oops - BUG";
285 ret = __die(str, err, thread, regs);
286 291
287 if (regs && kexec_should_crash(thread->task)) 292static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
293{
294 if (regs && kexec_should_crash(current))
288 crash_kexec(regs); 295 crash_kexec(regs);
289 296
290 bust_spinlocks(0); 297 bust_spinlocks(0);
298 die_owner = -1;
291 add_taint(TAINT_DIE); 299 add_taint(TAINT_DIE);
292 raw_spin_unlock_irq(&die_lock); 300 die_nest_count--;
301 if (!die_nest_count)
302 /* Nest count reaches zero, release the lock. */
303 arch_spin_unlock(&die_lock);
304 raw_local_irq_restore(flags);
293 oops_exit(); 305 oops_exit();
294 306
295 if (in_interrupt()) 307 if (in_interrupt())
296 panic("Fatal exception in interrupt"); 308 panic("Fatal exception in interrupt");
297 if (panic_on_oops) 309 if (panic_on_oops)
298 panic("Fatal exception"); 310 panic("Fatal exception");
299 if (ret != NOTIFY_STOP) 311 if (signr)
300 do_exit(SIGSEGV); 312 do_exit(signr);
313}
314
315/*
316 * This function is protected against re-entrancy.
317 */
318void die(const char *str, struct pt_regs *regs, int err)
319{
320 enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
321 unsigned long flags = oops_begin();
322 int sig = SIGSEGV;
323
324 if (!user_mode(regs))
325 bug_type = report_bug(regs->ARM_pc, regs);
326 if (bug_type != BUG_TRAP_TYPE_NONE)
327 str = "Oops - BUG";
328
329 if (__die(str, err, regs))
330 sig = 0;
331
332 oops_end(flags, regs, sig);
301} 333}
302 334
303void arm_notify_die(const char *str, struct pt_regs *regs, 335void arm_notify_die(const char *str, struct pt_regs *regs,