aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-03 18:13:18 -0400
committerTejun Heo <tj@kernel.org>2009-07-03 18:13:18 -0400
commitc43768cbb7655ea5ff782ae250f6e2ef4297cf98 (patch)
tree3982e41dde3eecaa3739a5d1a8ed18d04bd74f01 /arch/arm/kernel
parent1a8dd307cc0a2119be4e578c517795464e6dabba (diff)
parent746a99a5af60ee676afa2ba469ccd1373493c7e7 (diff)
Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix changes. As alpha in percpu tree uses 'weak' attribute instead of inline assembly, there's no need for __used attribute. Conflicts: arch/alpha/include/asm/percpu.h arch/mn10300/kernel/vmlinux.lds.S include/linux/percpu-defs.h
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/irq.c28
-rw-r--r--arch/arm/kernel/process.c77
-rw-r--r--arch/arm/kernel/unwind.c19
-rw-r--r--arch/arm/kernel/vmlinux.lds.S23
5 files changed, 85 insertions, 64 deletions
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 1680e9e9c831..f776e72a4cb8 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -372,6 +372,8 @@
372/* 360 */ CALL(sys_inotify_init1) 372/* 360 */ CALL(sys_inotify_init1)
373 CALL(sys_preadv) 373 CALL(sys_preadv)
374 CALL(sys_pwritev) 374 CALL(sys_pwritev)
375 CALL(sys_rt_tgsigqueueinfo)
376 CALL(sys_perf_counter_open)
375#ifndef syscalls_counted 377#ifndef syscalls_counted
376.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 378.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
377#define syscalls_counted 379#define syscalls_counted
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6874c7dca75a..b7c3490eaa24 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -98,17 +98,6 @@ unlock:
98 return 0; 98 return 0;
99} 99}
100 100
101/* Handle bad interrupts */
102static struct irq_desc bad_irq_desc = {
103 .handle_irq = handle_bad_irq,
104 .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
105};
106
107#ifdef CONFIG_CPUMASK_OFFSTACK
108/* We are not allocating bad_irq_desc.affinity or .pending_mask */
109#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
110#endif
111
112/* 101/*
113 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not 102 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
114 * come via this function. Instead, they should provide their 103 * come via this function. Instead, they should provide their
@@ -124,10 +113,13 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
124 * Some hardware gives randomly wrong interrupts. Rather 113 * Some hardware gives randomly wrong interrupts. Rather
125 * than crashing, do something sensible. 114 * than crashing, do something sensible.
126 */ 115 */
127 if (irq >= NR_IRQS) 116 if (unlikely(irq >= NR_IRQS)) {
128 handle_bad_irq(irq, &bad_irq_desc); 117 if (printk_ratelimit())
129 else 118 printk(KERN_WARNING "Bad IRQ%u\n", irq);
119 ack_bad_irq(irq);
120 } else {
130 generic_handle_irq(irq); 121 generic_handle_irq(irq);
122 }
131 123
132 /* AT91 specific workaround */ 124 /* AT91 specific workaround */
133 irq_finish(irq); 125 irq_finish(irq);
@@ -165,10 +157,6 @@ void __init init_IRQ(void)
165 for (irq = 0; irq < NR_IRQS; irq++) 157 for (irq = 0; irq < NR_IRQS; irq++)
166 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; 158 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
167 159
168#ifdef CONFIG_SMP
169 cpumask_setall(bad_irq_desc.affinity);
170 bad_irq_desc.cpu = smp_processor_id();
171#endif
172 init_arch_irq(); 160 init_arch_irq();
173} 161}
174 162
@@ -176,7 +164,7 @@ void __init init_IRQ(void)
176 164
177static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 165static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
178{ 166{
179 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); 167 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
180 168
181 spin_lock_irq(&desc->lock); 169 spin_lock_irq(&desc->lock);
182 desc->chip->set_affinity(irq, cpumask_of(cpu)); 170 desc->chip->set_affinity(irq, cpumask_of(cpu));
@@ -195,7 +183,7 @@ void migrate_irqs(void)
195 for (i = 0; i < NR_IRQS; i++) { 183 for (i = 0; i < NR_IRQS; i++) {
196 struct irq_desc *desc = irq_desc + i; 184 struct irq_desc *desc = irq_desc + i;
197 185
198 if (desc->cpu == cpu) { 186 if (desc->node == cpu) {
199 unsigned int newcpu = cpumask_any_and(desc->affinity, 187 unsigned int newcpu = cpumask_any_and(desc->affinity,
200 cpu_online_mask); 188 cpu_online_mask);
201 if (newcpu >= nr_cpu_ids) { 189 if (newcpu >= nr_cpu_ids) {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1585423699ee..39196dff478c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -114,9 +114,6 @@ void arm_machine_restart(char mode, const char *cmd)
114/* 114/*
115 * Function pointers to optional machine specific functions 115 * Function pointers to optional machine specific functions
116 */ 116 */
117void (*pm_idle)(void);
118EXPORT_SYMBOL(pm_idle);
119
120void (*pm_power_off)(void); 117void (*pm_power_off)(void);
121EXPORT_SYMBOL(pm_power_off); 118EXPORT_SYMBOL(pm_power_off);
122 119
@@ -130,20 +127,19 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);
130 */ 127 */
131static void default_idle(void) 128static void default_idle(void)
132{ 129{
133 if (hlt_counter) 130 if (!need_resched())
134 cpu_relax(); 131 arch_idle();
135 else { 132 local_irq_enable();
136 local_irq_disable();
137 if (!need_resched())
138 arch_idle();
139 local_irq_enable();
140 }
141} 133}
142 134
135void (*pm_idle)(void) = default_idle;
136EXPORT_SYMBOL(pm_idle);
137
143/* 138/*
144 * The idle thread. We try to conserve power, while trying to keep 139 * The idle thread, has rather strange semantics for calling pm_idle,
145 * overall latency low. The architecture specific idle is passed 140 * but this is what x86 does and we need to do the same, so that
146 * a value to indicate the level of "idleness" of the system. 141 * things like cpuidle get called in the same way. The only difference
142 * is that we always respect 'hlt_counter' to prevent low power idle.
147 */ 143 */
148void cpu_idle(void) 144void cpu_idle(void)
149{ 145{
@@ -151,21 +147,31 @@ void cpu_idle(void)
151 147
152 /* endless idle loop with no priority at all */ 148 /* endless idle loop with no priority at all */
153 while (1) { 149 while (1) {
154 void (*idle)(void) = pm_idle; 150 tick_nohz_stop_sched_tick(1);
155 151 leds_event(led_idle_start);
152 while (!need_resched()) {
156#ifdef CONFIG_HOTPLUG_CPU 153#ifdef CONFIG_HOTPLUG_CPU
157 if (cpu_is_offline(smp_processor_id())) { 154 if (cpu_is_offline(smp_processor_id()))
158 leds_event(led_idle_start); 155 cpu_die();
159 cpu_die();
160 }
161#endif 156#endif
162 157
163 if (!idle) 158 local_irq_disable();
164 idle = default_idle; 159 if (hlt_counter) {
165 leds_event(led_idle_start); 160 local_irq_enable();
166 tick_nohz_stop_sched_tick(1); 161 cpu_relax();
167 while (!need_resched()) 162 } else {
168 idle(); 163 stop_critical_timings();
164 pm_idle();
165 start_critical_timings();
166 /*
167 * This will eventually be removed - pm_idle
168 * functions should always return with IRQs
169 * enabled.
170 */
171 WARN_ON(irqs_disabled());
172 local_irq_enable();
173 }
174 }
169 leds_event(led_idle_end); 175 leds_event(led_idle_end);
170 tick_nohz_restart_sched_tick(); 176 tick_nohz_restart_sched_tick();
171 preempt_enable_no_resched(); 177 preempt_enable_no_resched();
@@ -352,6 +358,23 @@ asm( ".section .text\n"
352" .size kernel_thread_helper, . - kernel_thread_helper\n" 358" .size kernel_thread_helper, . - kernel_thread_helper\n"
353" .previous"); 359" .previous");
354 360
361#ifdef CONFIG_ARM_UNWIND
362extern void kernel_thread_exit(long code);
363asm( ".section .text\n"
364" .align\n"
365" .type kernel_thread_exit, #function\n"
366"kernel_thread_exit:\n"
367" .fnstart\n"
368" .cantunwind\n"
369" bl do_exit\n"
370" nop\n"
371" .fnend\n"
372" .size kernel_thread_exit, . - kernel_thread_exit\n"
373" .previous");
374#else
375#define kernel_thread_exit do_exit
376#endif
377
355/* 378/*
356 * Create a kernel thread. 379 * Create a kernel thread.
357 */ 380 */
@@ -363,7 +386,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
363 386
364 regs.ARM_r1 = (unsigned long)arg; 387 regs.ARM_r1 = (unsigned long)arg;
365 regs.ARM_r2 = (unsigned long)fn; 388 regs.ARM_r2 = (unsigned long)fn;
366 regs.ARM_r3 = (unsigned long)do_exit; 389 regs.ARM_r3 = (unsigned long)kernel_thread_exit;
367 regs.ARM_pc = (unsigned long)kernel_thread_helper; 390 regs.ARM_pc = (unsigned long)kernel_thread_helper;
368 regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE; 391 regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;
369 392
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 1dedc2c7ff49..dd56e11f339a 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -212,7 +212,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
212 ctrl->vrs[14] = *vsp++; 212 ctrl->vrs[14] = *vsp++;
213 ctrl->vrs[SP] = (unsigned long)vsp; 213 ctrl->vrs[SP] = (unsigned long)vsp;
214 } else if (insn == 0xb0) { 214 } else if (insn == 0xb0) {
215 ctrl->vrs[PC] = ctrl->vrs[LR]; 215 if (ctrl->vrs[PC] == 0)
216 ctrl->vrs[PC] = ctrl->vrs[LR];
216 /* no further processing */ 217 /* no further processing */
217 ctrl->entries = 0; 218 ctrl->entries = 0;
218 } else if (insn == 0xb1) { 219 } else if (insn == 0xb1) {
@@ -309,18 +310,20 @@ int unwind_frame(struct stackframe *frame)
309 } 310 }
310 311
311 while (ctrl.entries > 0) { 312 while (ctrl.entries > 0) {
312 int urc; 313 int urc = unwind_exec_insn(&ctrl);
313
314 if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
315 return -URC_FAILURE;
316 urc = unwind_exec_insn(&ctrl);
317 if (urc < 0) 314 if (urc < 0)
318 return urc; 315 return urc;
316 if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
317 return -URC_FAILURE;
319 } 318 }
320 319
321 if (ctrl.vrs[PC] == 0) 320 if (ctrl.vrs[PC] == 0)
322 ctrl.vrs[PC] = ctrl.vrs[LR]; 321 ctrl.vrs[PC] = ctrl.vrs[LR];
323 322
323 /* check for infinite loop */
324 if (frame->pc == ctrl.vrs[PC])
325 return -URC_FAILURE;
326
324 frame->fp = ctrl.vrs[FP]; 327 frame->fp = ctrl.vrs[FP];
325 frame->sp = ctrl.vrs[SP]; 328 frame->sp = ctrl.vrs[SP];
326 frame->lr = ctrl.vrs[LR]; 329 frame->lr = ctrl.vrs[LR];
@@ -332,7 +335,6 @@ int unwind_frame(struct stackframe *frame)
332void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) 335void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
333{ 336{
334 struct stackframe frame; 337 struct stackframe frame;
335 unsigned long high, low;
336 register unsigned long current_sp asm ("sp"); 338 register unsigned long current_sp asm ("sp");
337 339
338 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 340 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -362,9 +364,6 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
362 frame.pc = thread_saved_pc(tsk); 364 frame.pc = thread_saved_pc(tsk);
363 } 365 }
364 366
365 low = frame.sp & ~(THREAD_SIZE - 1);
366 high = low + THREAD_SIZE;
367
368 while (1) { 367 while (1) {
369 int urc; 368 int urc;
370 unsigned long where = frame.pc; 369 unsigned long where = frame.pc;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index e256c57b8981..5cc4812c9763 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
6#include <asm-generic/vmlinux.lds.h> 6#include <asm-generic/vmlinux.lds.h>
7#include <asm/thread_info.h> 7#include <asm/thread_info.h>
8#include <asm/memory.h> 8#include <asm/memory.h>
9#include <asm/page.h>
9 10
10OUTPUT_ARCH(arm) 11OUTPUT_ARCH(arm)
11ENTRY(stext) 12ENTRY(stext)
@@ -63,7 +64,7 @@ SECTIONS
63 usr/built-in.o(.init.ramfs) 64 usr/built-in.o(.init.ramfs)
64 __initramfs_end = .; 65 __initramfs_end = .;
65#endif 66#endif
66 . = ALIGN(4096); 67 . = ALIGN(PAGE_SIZE);
67 __per_cpu_load = .; 68 __per_cpu_load = .;
68 __per_cpu_start = .; 69 __per_cpu_start = .;
69 *(.data.percpu.page_aligned) 70 *(.data.percpu.page_aligned)
@@ -73,7 +74,7 @@ SECTIONS
73#ifndef CONFIG_XIP_KERNEL 74#ifndef CONFIG_XIP_KERNEL
74 __init_begin = _stext; 75 __init_begin = _stext;
75 INIT_DATA 76 INIT_DATA
76 . = ALIGN(4096); 77 . = ALIGN(PAGE_SIZE);
77 __init_end = .; 78 __init_end = .;
78#endif 79#endif
79 } 80 }
@@ -85,6 +86,14 @@ SECTIONS
85 *(.discard) 86 *(.discard)
86 *(.ARM.exidx.exit.text) 87 *(.ARM.exidx.exit.text)
87 *(.ARM.extab.exit.text) 88 *(.ARM.extab.exit.text)
89#ifndef CONFIG_HOTPLUG_CPU
90 *(.ARM.exidx.cpuexit.text)
91 *(.ARM.extab.cpuexit.text)
92#endif
93#ifndef CONFIG_HOTPLUG
94 *(.ARM.exidx.devexit.text)
95 *(.ARM.extab.devexit.text)
96#endif
88#ifndef CONFIG_MMU 97#ifndef CONFIG_MMU
89 *(.fixup) 98 *(.fixup)
90 *(__ex_table) 99 *(__ex_table)
@@ -111,7 +120,7 @@ SECTIONS
111 *(.got) /* Global offset table */ 120 *(.got) /* Global offset table */
112 } 121 }
113 122
114 RODATA 123 RO_DATA(PAGE_SIZE)
115 124
116 _etext = .; /* End of text and rodata section */ 125 _etext = .; /* End of text and rodata section */
117 126
@@ -151,17 +160,17 @@ SECTIONS
151 *(.data.init_task) 160 *(.data.init_task)
152 161
153#ifdef CONFIG_XIP_KERNEL 162#ifdef CONFIG_XIP_KERNEL
154 . = ALIGN(4096); 163 . = ALIGN(PAGE_SIZE);
155 __init_begin = .; 164 __init_begin = .;
156 INIT_DATA 165 INIT_DATA
157 . = ALIGN(4096); 166 . = ALIGN(PAGE_SIZE);
158 __init_end = .; 167 __init_end = .;
159#endif 168#endif
160 169
161 . = ALIGN(4096); 170 . = ALIGN(PAGE_SIZE);
162 __nosave_begin = .; 171 __nosave_begin = .;
163 *(.data.nosave) 172 *(.data.nosave)
164 . = ALIGN(4096); 173 . = ALIGN(PAGE_SIZE);
165 __nosave_end = .; 174 __nosave_end = .;
166 175
167 /* 176 /*