aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/armksyms.c5
-rw-r--r--arch/arm/kernel/atags.c83
-rw-r--r--arch/arm/kernel/ecard.c13
-rw-r--r--arch/arm/kernel/ecard.h13
-rw-r--r--arch/arm/kernel/entry-common.S51
-rw-r--r--arch/arm/kernel/ftrace.c116
-rw-r--r--arch/arm/kernel/kprobes.c2
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/kernel/smp.c163
-rw-r--r--arch/arm/kernel/stacktrace.c35
-rw-r--r--arch/arm/kernel/time.c120
12 files changed, 287 insertions, 323 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index ad455ff5aebe..eb9092ca8008 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -4,6 +4,10 @@
4 4
5AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) 5AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
6 6
7ifdef CONFIG_DYNAMIC_FTRACE
8CFLAGS_REMOVE_ftrace.o = -pg
9endif
10
7# Object file lists. 11# Object file lists.
8 12
9obj-y := compat.o entry-armv.o entry-common.o irq.o \ 13obj-y := compat.o entry-armv.o entry-common.o irq.o \
@@ -18,6 +22,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
18obj-$(CONFIG_ISA_DMA) += dma-isa.o 22obj-$(CONFIG_ISA_DMA) += dma-isa.o
19obj-$(CONFIG_PCI) += bios32.o isa.o 23obj-$(CONFIG_PCI) += bios32.o isa.o
20obj-$(CONFIG_SMP) += smp.o 24obj-$(CONFIG_SMP) += smp.o
25obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
21obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 26obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
22obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o 27obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
23obj-$(CONFIG_ATAGS_PROC) += atags.o 28obj-$(CONFIG_ATAGS_PROC) += atags.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 688b7b1ee416..cc7b246e9652 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -18,6 +18,7 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/ftrace.h>
21 22
22/* 23/*
23 * libgcc functions - functions that are used internally by the 24 * libgcc functions - functions that are used internally by the
@@ -181,3 +182,7 @@ EXPORT_SYMBOL(_find_next_bit_be);
181#endif 182#endif
182 183
183EXPORT_SYMBOL(copy_page); 184EXPORT_SYMBOL(copy_page);
185
186#ifdef CONFIG_FTRACE
187EXPORT_SYMBOL(mcount);
188#endif
diff --git a/arch/arm/kernel/atags.c b/arch/arm/kernel/atags.c
index 64c420805e6f..42a1a1415fa6 100644
--- a/arch/arm/kernel/atags.c
+++ b/arch/arm/kernel/atags.c
@@ -1,5 +1,4 @@
1#include <linux/slab.h> 1#include <linux/slab.h>
2#include <linux/kexec.h>
3#include <linux/proc_fs.h> 2#include <linux/proc_fs.h>
4#include <asm/setup.h> 3#include <asm/setup.h>
5#include <asm/types.h> 4#include <asm/types.h>
@@ -7,9 +6,8 @@
7 6
8struct buffer { 7struct buffer {
9 size_t size; 8 size_t size;
10 char *data; 9 char data[];
11}; 10};
12static struct buffer tags_buffer;
13 11
14static int 12static int
15read_buffer(char* page, char** start, off_t off, int count, 13read_buffer(char* page, char** start, off_t off, int count,
@@ -29,58 +27,57 @@ read_buffer(char* page, char** start, off_t off, int count,
29 return count; 27 return count;
30} 28}
31 29
32 30#define BOOT_PARAMS_SIZE 1536
33static int 31static char __initdata atags_copy[BOOT_PARAMS_SIZE];
34create_proc_entries(void)
35{
36 struct proc_dir_entry* tags_entry;
37
38 tags_entry = create_proc_read_entry("atags", 0400, NULL, read_buffer, &tags_buffer);
39 if (!tags_entry)
40 return -ENOMEM;
41
42 return 0;
43}
44
45
46static char __initdata atags_copy_buf[KEXEC_BOOT_PARAMS_SIZE];
47static char __initdata *atags_copy;
48 32
49void __init save_atags(const struct tag *tags) 33void __init save_atags(const struct tag *tags)
50{ 34{
51 atags_copy = atags_copy_buf; 35 memcpy(atags_copy, tags, sizeof(atags_copy));
52 memcpy(atags_copy, tags, KEXEC_BOOT_PARAMS_SIZE);
53} 36}
54 37
55
56static int __init init_atags_procfs(void) 38static int __init init_atags_procfs(void)
57{ 39{
58 struct tag *tag; 40 /*
59 int error; 41 * This cannot go into save_atags() because kmalloc and proc don't work
42 * yet when it is called.
43 */
44 struct proc_dir_entry *tags_entry;
45 struct tag *tag = (struct tag *)atags_copy;
46 struct buffer *b;
47 size_t size;
60 48
61 if (!atags_copy) { 49 if (tag->hdr.tag != ATAG_CORE) {
62 printk(KERN_WARNING "Exporting ATAGs: No saved tags found\n"); 50 printk(KERN_INFO "No ATAGs?");
63 return -EIO; 51 return -EINVAL;
64 } 52 }
65 53
66 for (tag = (struct tag *) atags_copy; tag->hdr.size; tag = tag_next(tag)) 54 for (; tag->hdr.size; tag = tag_next(tag))
67 ; 55 ;
68 56
69 tags_buffer.size = ((char *) tag - atags_copy) + sizeof(tag->hdr); 57 /* include the terminating ATAG_NONE */
70 tags_buffer.data = kmalloc(tags_buffer.size, GFP_KERNEL); 58 size = (char *)tag - atags_copy + sizeof(struct tag_header);
71 if (tags_buffer.data == NULL)
72 return -ENOMEM;
73 memcpy(tags_buffer.data, atags_copy, tags_buffer.size);
74
75 error = create_proc_entries();
76 if (error) {
77 printk(KERN_ERR "Exporting ATAGs: not enough memory\n");
78 kfree(tags_buffer.data);
79 tags_buffer.size = 0;
80 tags_buffer.data = NULL;
81 }
82 59
83 return error; 60 WARN_ON(tag->hdr.tag != ATAG_NONE);
84} 61
62 b = kmalloc(sizeof(*b) + size, GFP_KERNEL);
63 if (!b)
64 goto nomem;
85 65
66 b->size = size;
67 memcpy(b->data, atags_copy, size);
68
69 tags_entry = create_proc_read_entry("atags", 0400,
70 NULL, read_buffer, b);
71
72 if (!tags_entry)
73 goto nomem;
74
75 return 0;
76
77nomem:
78 kfree(b);
79 printk(KERN_ERR "Exporting ATAGs: not enough memory\n");
80
81 return -ENOMEM;
82}
86arch_initcall(init_atags_procfs); 83arch_initcall(init_atags_procfs);
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index a53c0aba5c14..8bfd299bfe77 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -680,7 +680,7 @@ static int __init ecard_probeirqhw(void)
680#define IO_EC_MEMC8_BASE 0 680#define IO_EC_MEMC8_BASE 0
681#endif 681#endif
682 682
683unsigned int __ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed) 683static unsigned int __ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
684{ 684{
685 unsigned long address = 0; 685 unsigned long address = 0;
686 int slot = ec->slot_no; 686 int slot = ec->slot_no;
@@ -1002,7 +1002,7 @@ ecard_probe(int slot, card_type_t type)
1002 } 1002 }
1003 1003
1004 rc = -ENODEV; 1004 rc = -ENODEV;
1005 if ((ec->podaddr = ecard_address(ec, type, ECARD_SYNC)) == 0) 1005 if ((ec->podaddr = __ecard_address(ec, type, ECARD_SYNC)) == 0)
1006 goto nodev; 1006 goto nodev;
1007 1007
1008 cid.r_zero = 1; 1008 cid.r_zero = 1;
@@ -1141,10 +1141,10 @@ static int ecard_drv_probe(struct device *dev)
1141 1141
1142 id = ecard_match_device(drv->id_table, ec); 1142 id = ecard_match_device(drv->id_table, ec);
1143 1143
1144 ecard_claim(ec); 1144 ec->claimed = 1;
1145 ret = drv->probe(ec, id); 1145 ret = drv->probe(ec, id);
1146 if (ret) 1146 if (ret)
1147 ecard_release(ec); 1147 ec->claimed = 0;
1148 return ret; 1148 return ret;
1149} 1149}
1150 1150
@@ -1154,7 +1154,7 @@ static int ecard_drv_remove(struct device *dev)
1154 struct ecard_driver *drv = ECARD_DRV(dev->driver); 1154 struct ecard_driver *drv = ECARD_DRV(dev->driver);
1155 1155
1156 drv->remove(ec); 1156 drv->remove(ec);
1157 ecard_release(ec); 1157 ec->claimed = 0;
1158 1158
1159 /* 1159 /*
1160 * Restore the default operations. We ensure that the 1160 * Restore the default operations. We ensure that the
@@ -1182,7 +1182,7 @@ static void ecard_drv_shutdown(struct device *dev)
1182 if (dev->driver) { 1182 if (dev->driver) {
1183 if (drv->shutdown) 1183 if (drv->shutdown)
1184 drv->shutdown(ec); 1184 drv->shutdown(ec);
1185 ecard_release(ec); 1185 ec->claimed = 0;
1186 } 1186 }
1187 1187
1188 /* 1188 /*
@@ -1239,7 +1239,6 @@ static int ecard_bus_init(void)
1239postcore_initcall(ecard_bus_init); 1239postcore_initcall(ecard_bus_init);
1240 1240
1241EXPORT_SYMBOL(ecard_readchunk); 1241EXPORT_SYMBOL(ecard_readchunk);
1242EXPORT_SYMBOL(__ecard_address);
1243EXPORT_SYMBOL(ecard_register_driver); 1242EXPORT_SYMBOL(ecard_register_driver);
1244EXPORT_SYMBOL(ecard_remove_driver); 1243EXPORT_SYMBOL(ecard_remove_driver);
1245EXPORT_SYMBOL(ecard_bus_type); 1244EXPORT_SYMBOL(ecard_bus_type);
diff --git a/arch/arm/kernel/ecard.h b/arch/arm/kernel/ecard.h
index d7c2dacf935d..4642d436be2a 100644
--- a/arch/arm/kernel/ecard.h
+++ b/arch/arm/kernel/ecard.h
@@ -54,3 +54,16 @@ struct ex_chunk_dir {
54#define c_len(x) ((x)->r_len[0]|((x)->r_len[1]<<8)|((x)->r_len[2]<<16)) 54#define c_len(x) ((x)->r_len[0]|((x)->r_len[1]<<8)|((x)->r_len[2]<<16))
55#define c_start(x) ((x)->r_start) 55#define c_start(x) ((x)->r_start)
56}; 56};
57
58typedef enum ecard_type { /* Cards address space */
59 ECARD_IOC,
60 ECARD_MEMC,
61 ECARD_EASI
62} card_type_t;
63
64typedef enum { /* Speed for ECARD_IOC space */
65 ECARD_SLOW = 0,
66 ECARD_MEDIUM = 1,
67 ECARD_FAST = 2,
68 ECARD_SYNC = 3
69} card_speed_t;
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 597ed00a08d8..84694e88b428 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <asm/unistd.h> 11#include <asm/unistd.h>
12#include <asm/ftrace.h>
12#include <asm/arch/entry-macro.S> 13#include <asm/arch/entry-macro.S>
13 14
14#include "entry-header.S" 15#include "entry-header.S"
@@ -99,6 +100,56 @@ ENTRY(ret_from_fork)
99#undef CALL 100#undef CALL
100#define CALL(x) .long x 101#define CALL(x) .long x
101 102
103#ifdef CONFIG_FTRACE
104#ifdef CONFIG_DYNAMIC_FTRACE
105ENTRY(mcount)
106 stmdb sp!, {r0-r3, lr}
107 mov r0, lr
108 sub r0, r0, #MCOUNT_INSN_SIZE
109
110 .globl mcount_call
111mcount_call:
112 bl ftrace_stub
113 ldmia sp!, {r0-r3, pc}
114
115ENTRY(ftrace_caller)
116 stmdb sp!, {r0-r3, lr}
117 ldr r1, [fp, #-4]
118 mov r0, lr
119 sub r0, r0, #MCOUNT_INSN_SIZE
120
121 .globl ftrace_call
122ftrace_call:
123 bl ftrace_stub
124 ldmia sp!, {r0-r3, pc}
125
126#else
127
128ENTRY(mcount)
129 stmdb sp!, {r0-r3, lr}
130 ldr r0, =ftrace_trace_function
131 ldr r2, [r0]
132 adr r0, ftrace_stub
133 cmp r0, r2
134 bne trace
135 ldmia sp!, {r0-r3, pc}
136
137trace:
138 ldr r1, [fp, #-4]
139 mov r0, lr
140 sub r0, r0, #MCOUNT_INSN_SIZE
141 mov lr, pc
142 mov pc, r2
143 ldmia sp!, {r0-r3, pc}
144
145#endif /* CONFIG_DYNAMIC_FTRACE */
146
147 .globl ftrace_stub
148ftrace_stub:
149 mov pc, lr
150
151#endif /* CONFIG_FTRACE */
152
102/*============================================================================= 153/*=============================================================================
103 * SWI handler 154 * SWI handler
104 *----------------------------------------------------------------------------- 155 *-----------------------------------------------------------------------------
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
new file mode 100644
index 000000000000..76d50e6091bc
--- /dev/null
+++ b/arch/arm/kernel/ftrace.c
@@ -0,0 +1,116 @@
1/*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 *
6 * For licencing details, see COPYING.
7 *
8 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are
11 * enabled. All code mutation routines here take effect atomically.
12 */
13
14#include <linux/ftrace.h>
15
16#include <asm/cacheflush.h>
17#include <asm/ftrace.h>
18
19#define PC_OFFSET 8
20#define BL_OPCODE 0xeb000000
21#define BL_OFFSET_MASK 0x00ffffff
22
23static unsigned long bl_insn;
24static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
25
26unsigned char *ftrace_nop_replace(void)
27{
28 return (char *)&NOP;
29}
30
31/* construct a branch (BL) instruction to addr */
32unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
33{
34 long offset;
35
36 offset = (long)addr - (long)(pc + PC_OFFSET);
37 if (unlikely(offset < -33554432 || offset > 33554428)) {
38 /* Can't generate branches that far (from ARM ARM). Ftrace
39 * doesn't generate branches outside of kernel text.
40 */
41 WARN_ON_ONCE(1);
42 return NULL;
43 }
44 offset = (offset >> 2) & BL_OFFSET_MASK;
45 bl_insn = BL_OPCODE | offset;
46 return (unsigned char *)&bl_insn;
47}
48
49int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
50 unsigned char *new_code)
51{
52 unsigned long err = 0, replaced = 0, old, new;
53
54 old = *(unsigned long *)old_code;
55 new = *(unsigned long *)new_code;
56
57 __asm__ __volatile__ (
58 "1: ldr %1, [%2] \n"
59 " cmp %1, %4 \n"
60 "2: streq %3, [%2] \n"
61 " cmpne %1, %3 \n"
62 " movne %0, #2 \n"
63 "3:\n"
64
65 ".section .fixup, \"ax\"\n"
66 "4: mov %0, #1 \n"
67 " b 3b \n"
68 ".previous\n"
69
70 ".section __ex_table, \"a\"\n"
71 " .long 1b, 4b \n"
72 " .long 2b, 4b \n"
73 ".previous\n"
74
75 : "=r"(err), "=r"(replaced)
76 : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
77 : "memory");
78
79 if (!err && (replaced == old))
80 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
81
82 return err;
83}
84
85int ftrace_update_ftrace_func(ftrace_func_t func)
86{
87 int ret;
88 unsigned long pc, old;
89 unsigned char *new;
90
91 pc = (unsigned long)&ftrace_call;
92 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
93 new = ftrace_call_replace(pc, (unsigned long)func);
94 ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
95 return ret;
96}
97
98int ftrace_mcount_set(unsigned long *data)
99{
100 unsigned long pc, old;
101 unsigned long *addr = data;
102 unsigned char *new;
103
104 pc = (unsigned long)&mcount_call;
105 memcpy(&old, &mcount_call, MCOUNT_INSN_SIZE);
106 new = ftrace_call_replace(pc, *addr);
107 *addr = ftrace_modify_code(pc, (unsigned char *)&old, new);
108 return 0;
109}
110
111/* run from kstop_machine */
112int __init ftrace_dyn_arch_init(void *data)
113{
114 ftrace_mcount_set(data);
115 return 0;
116}
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 5593dd207216..5ee39e10c8d1 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -274,7 +274,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
274 * for kretprobe handlers which should normally be interested in r0 only 274 * for kretprobe handlers which should normally be interested in r0 only
275 * anyway. 275 * anyway.
276 */ 276 */
277static void __attribute__((naked)) __kprobes kretprobe_trampoline(void) 277void __naked __kprobes kretprobe_trampoline(void)
278{ 278{
279 __asm__ __volatile__ ( 279 __asm__ __volatile__ (
280 "stmdb sp!, {r0 - r11} \n\t" 280 "stmdb sp!, {r0 - r11} \n\t"
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 84f5a4c778fb..89bfded70a1f 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -133,10 +133,8 @@ static void default_idle(void)
133 cpu_relax(); 133 cpu_relax();
134 else { 134 else {
135 local_irq_disable(); 135 local_irq_disable();
136 if (!need_resched()) { 136 if (!need_resched())
137 timer_dyn_reprogram();
138 arch_idle(); 137 arch_idle();
139 }
140 local_irq_enable(); 138 local_irq_enable();
141 } 139 }
142} 140}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index eefae1de334c..5a7c09564d13 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -68,20 +68,10 @@ enum ipi_msg_type {
68 IPI_TIMER, 68 IPI_TIMER,
69 IPI_RESCHEDULE, 69 IPI_RESCHEDULE,
70 IPI_CALL_FUNC, 70 IPI_CALL_FUNC,
71 IPI_CALL_FUNC_SINGLE,
71 IPI_CPU_STOP, 72 IPI_CPU_STOP,
72}; 73};
73 74
74struct smp_call_struct {
75 void (*func)(void *info);
76 void *info;
77 int wait;
78 cpumask_t pending;
79 cpumask_t unfinished;
80};
81
82static struct smp_call_struct * volatile smp_call_function_data;
83static DEFINE_SPINLOCK(smp_call_function_lock);
84
85int __cpuinit __cpu_up(unsigned int cpu) 75int __cpuinit __cpu_up(unsigned int cpu)
86{ 76{
87 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 77 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
366 local_irq_restore(flags); 356 local_irq_restore(flags);
367} 357}
368 358
369/* 359void arch_send_call_function_ipi(cpumask_t mask)
370 * You must not call this function with disabled interrupts, from a
371 * hardware interrupt handler, nor from a bottom half handler.
372 */
373static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
374 int retry, int wait, cpumask_t callmap)
375{
376 struct smp_call_struct data;
377 unsigned long timeout;
378 int ret = 0;
379
380 data.func = func;
381 data.info = info;
382 data.wait = wait;
383
384 cpu_clear(smp_processor_id(), callmap);
385 if (cpus_empty(callmap))
386 goto out;
387
388 data.pending = callmap;
389 if (wait)
390 data.unfinished = callmap;
391
392 /*
393 * try to get the mutex on smp_call_function_data
394 */
395 spin_lock(&smp_call_function_lock);
396 smp_call_function_data = &data;
397
398 send_ipi_message(callmap, IPI_CALL_FUNC);
399
400 timeout = jiffies + HZ;
401 while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
402 barrier();
403
404 /*
405 * did we time out?
406 */
407 if (!cpus_empty(data.pending)) {
408 /*
409 * this may be causing our panic - report it
410 */
411 printk(KERN_CRIT
412 "CPU%u: smp_call_function timeout for %p(%p)\n"
413 " callmap %lx pending %lx, %swait\n",
414 smp_processor_id(), func, info, *cpus_addr(callmap),
415 *cpus_addr(data.pending), wait ? "" : "no ");
416
417 /*
418 * TRACE
419 */
420 timeout = jiffies + (5 * HZ);
421 while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
422 barrier();
423
424 if (cpus_empty(data.pending))
425 printk(KERN_CRIT " RESOLVED\n");
426 else
427 printk(KERN_CRIT " STILL STUCK\n");
428 }
429
430 /*
431 * whatever happened, we're done with the data, so release it
432 */
433 smp_call_function_data = NULL;
434 spin_unlock(&smp_call_function_lock);
435
436 if (!cpus_empty(data.pending)) {
437 ret = -ETIMEDOUT;
438 goto out;
439 }
440
441 if (wait)
442 while (!cpus_empty(data.unfinished))
443 barrier();
444 out:
445
446 return 0;
447}
448
449int smp_call_function(void (*func)(void *info), void *info, int retry,
450 int wait)
451{ 360{
452 return smp_call_function_on_cpu(func, info, retry, wait, 361 send_ipi_message(mask, IPI_CALL_FUNC);
453 cpu_online_map);
454} 362}
455EXPORT_SYMBOL_GPL(smp_call_function);
456 363
457int smp_call_function_single(int cpu, void (*func)(void *info), void *info, 364void arch_send_call_function_single_ipi(int cpu)
458 int retry, int wait)
459{ 365{
460 /* prevent preemption and reschedule on another processor */ 366 send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
461 int current_cpu = get_cpu();
462 int ret = 0;
463
464 if (cpu == current_cpu) {
465 local_irq_disable();
466 func(info);
467 local_irq_enable();
468 } else
469 ret = smp_call_function_on_cpu(func, info, retry, wait,
470 cpumask_of_cpu(cpu));
471
472 put_cpu();
473
474 return ret;
475} 367}
476EXPORT_SYMBOL_GPL(smp_call_function_single);
477 368
478void show_ipi_list(struct seq_file *p) 369void show_ipi_list(struct seq_file *p)
479{ 370{
@@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs)
521} 412}
522#endif 413#endif
523 414
524/*
525 * ipi_call_function - handle IPI from smp_call_function()
526 *
527 * Note that we copy data out of the cross-call structure and then
528 * let the caller know that we're here and have done with their data
529 */
530static void ipi_call_function(unsigned int cpu)
531{
532 struct smp_call_struct *data = smp_call_function_data;
533 void (*func)(void *info) = data->func;
534 void *info = data->info;
535 int wait = data->wait;
536
537 cpu_clear(cpu, data->pending);
538
539 func(info);
540
541 if (wait)
542 cpu_clear(cpu, data->unfinished);
543}
544
545static DEFINE_SPINLOCK(stop_lock); 415static DEFINE_SPINLOCK(stop_lock);
546 416
547/* 417/*
@@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)
611 break; 481 break;
612 482
613 case IPI_CALL_FUNC: 483 case IPI_CALL_FUNC:
614 ipi_call_function(cpu); 484 generic_smp_call_function_interrupt();
485 break;
486
487 case IPI_CALL_FUNC_SINGLE:
488 generic_smp_call_function_single_interrupt();
615 break; 489 break;
616 490
617 case IPI_CPU_STOP: 491 case IPI_CPU_STOP:
@@ -662,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier)
662} 536}
663 537
664static int 538static int
665on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, 539on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
666 cpumask_t mask)
667{ 540{
668 int ret = 0; 541 int ret = 0;
669 542
670 preempt_disable(); 543 preempt_disable();
671 544
672 ret = smp_call_function_on_cpu(func, info, retry, wait, mask); 545 ret = smp_call_function_mask(mask, func, info, wait);
673 if (cpu_isset(smp_processor_id(), mask)) 546 if (cpu_isset(smp_processor_id(), mask))
674 func(info); 547 func(info);
675 548
@@ -731,14 +604,14 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
731 604
732void flush_tlb_all(void) 605void flush_tlb_all(void)
733{ 606{
734 on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); 607 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
735} 608}
736 609
737void flush_tlb_mm(struct mm_struct *mm) 610void flush_tlb_mm(struct mm_struct *mm)
738{ 611{
739 cpumask_t mask = mm->cpu_vm_mask; 612 cpumask_t mask = mm->cpu_vm_mask;
740 613
741 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); 614 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
742} 615}
743 616
744void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 617void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -749,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
749 ta.ta_vma = vma; 622 ta.ta_vma = vma;
750 ta.ta_start = uaddr; 623 ta.ta_start = uaddr;
751 624
752 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); 625 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
753} 626}
754 627
755void flush_tlb_kernel_page(unsigned long kaddr) 628void flush_tlb_kernel_page(unsigned long kaddr)
@@ -758,7 +631,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
758 631
759 ta.ta_start = kaddr; 632 ta.ta_start = kaddr;
760 633
761 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); 634 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
762} 635}
763 636
764void flush_tlb_range(struct vm_area_struct *vma, 637void flush_tlb_range(struct vm_area_struct *vma,
@@ -771,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
771 ta.ta_start = start; 644 ta.ta_start = start;
772 ta.ta_end = end; 645 ta.ta_end = end;
773 646
774 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); 647 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
775} 648}
776 649
777void flush_tlb_kernel_range(unsigned long start, unsigned long end) 650void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -781,5 +654,5 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
781 ta.ta_start = start; 654 ta.ta_start = start;
782 ta.ta_end = end; 655 ta.ta_end = end;
783 656
784 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); 657 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
785} 658}
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index ae31deb2d065..fc650f64df43 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -36,6 +36,7 @@ EXPORT_SYMBOL(walk_stackframe);
36#ifdef CONFIG_STACKTRACE 36#ifdef CONFIG_STACKTRACE
37struct stack_trace_data { 37struct stack_trace_data {
38 struct stack_trace *trace; 38 struct stack_trace *trace;
39 unsigned int no_sched_functions;
39 unsigned int skip; 40 unsigned int skip;
40}; 41};
41 42
@@ -43,27 +44,53 @@ static int save_trace(struct stackframe *frame, void *d)
43{ 44{
44 struct stack_trace_data *data = d; 45 struct stack_trace_data *data = d;
45 struct stack_trace *trace = data->trace; 46 struct stack_trace *trace = data->trace;
47 unsigned long addr = frame->lr;
46 48
49 if (data->no_sched_functions && in_sched_functions(addr))
50 return 0;
47 if (data->skip) { 51 if (data->skip) {
48 data->skip--; 52 data->skip--;
49 return 0; 53 return 0;
50 } 54 }
51 55
52 trace->entries[trace->nr_entries++] = frame->lr; 56 trace->entries[trace->nr_entries++] = addr;
53 57
54 return trace->nr_entries >= trace->max_entries; 58 return trace->nr_entries >= trace->max_entries;
55} 59}
56 60
57void save_stack_trace(struct stack_trace *trace) 61void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
58{ 62{
59 struct stack_trace_data data; 63 struct stack_trace_data data;
60 unsigned long fp, base; 64 unsigned long fp, base;
61 65
62 data.trace = trace; 66 data.trace = trace;
63 data.skip = trace->skip; 67 data.skip = trace->skip;
64 base = (unsigned long)task_stack_page(current); 68 base = (unsigned long)task_stack_page(tsk);
65 asm("mov %0, fp" : "=r" (fp)); 69
70 if (tsk != current) {
71#ifdef CONFIG_SMP
72 /*
73 * What guarantees do we have here that 'tsk'
74 * is not running on another CPU?
75 */
76 BUG();
77#else
78 data.no_sched_functions = 1;
79 fp = thread_saved_fp(tsk);
80#endif
81 } else {
82 data.no_sched_functions = 0;
83 asm("mov %0, fp" : "=r" (fp));
84 }
66 85
67 walk_stackframe(fp, base, base + THREAD_SIZE, save_trace, &data); 86 walk_stackframe(fp, base, base + THREAD_SIZE, save_trace, &data);
87 if (trace->nr_entries < trace->max_entries)
88 trace->entries[trace->nr_entries++] = ULONG_MAX;
89}
90
91void save_stack_trace(struct stack_trace *trace)
92{
93 save_stack_trace_tsk(current, trace);
68} 94}
95EXPORT_SYMBOL_GPL(save_stack_trace);
69#endif 96#endif
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index b5867eca1d0b..cc5145b28e7f 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -365,108 +365,6 @@ static struct sysdev_class timer_sysclass = {
365 .resume = timer_resume, 365 .resume = timer_resume,
366}; 366};
367 367
368#ifdef CONFIG_NO_IDLE_HZ
369static int timer_dyn_tick_enable(void)
370{
371 struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
372 unsigned long flags;
373 int ret = -ENODEV;
374
375 if (dyn_tick) {
376 spin_lock_irqsave(&dyn_tick->lock, flags);
377 ret = 0;
378 if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
379 ret = dyn_tick->enable();
380
381 if (ret == 0)
382 dyn_tick->state |= DYN_TICK_ENABLED;
383 }
384 spin_unlock_irqrestore(&dyn_tick->lock, flags);
385 }
386
387 return ret;
388}
389
390static int timer_dyn_tick_disable(void)
391{
392 struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
393 unsigned long flags;
394 int ret = -ENODEV;
395
396 if (dyn_tick) {
397 spin_lock_irqsave(&dyn_tick->lock, flags);
398 ret = 0;
399 if (dyn_tick->state & DYN_TICK_ENABLED) {
400 ret = dyn_tick->disable();
401
402 if (ret == 0)
403 dyn_tick->state &= ~DYN_TICK_ENABLED;
404 }
405 spin_unlock_irqrestore(&dyn_tick->lock, flags);
406 }
407
408 return ret;
409}
410
411/*
412 * Reprogram the system timer for at least the calculated time interval.
413 * This function should be called from the idle thread with IRQs disabled,
414 * immediately before sleeping.
415 */
416void timer_dyn_reprogram(void)
417{
418 struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick;
419 unsigned long next, seq, flags;
420
421 if (!dyn_tick)
422 return;
423
424 spin_lock_irqsave(&dyn_tick->lock, flags);
425 if (dyn_tick->state & DYN_TICK_ENABLED) {
426 next = next_timer_interrupt();
427 do {
428 seq = read_seqbegin(&xtime_lock);
429 dyn_tick->reprogram(next - jiffies);
430 } while (read_seqretry(&xtime_lock, seq));
431 }
432 spin_unlock_irqrestore(&dyn_tick->lock, flags);
433}
434
435static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)
436{
437 return sprintf(buf, "%i\n",
438 (system_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1);
439}
440
441static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf,
442 size_t count)
443{
444 unsigned int enable = simple_strtoul(buf, NULL, 2);
445
446 if (enable)
447 timer_dyn_tick_enable();
448 else
449 timer_dyn_tick_disable();
450
451 return count;
452}
453static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick);
454
455/*
456 * dyntick=enable|disable
457 */
458static char dyntick_str[4] __initdata = "";
459
460static int __init dyntick_setup(char *str)
461{
462 if (str)
463 strlcpy(dyntick_str, str, sizeof(dyntick_str));
464 return 1;
465}
466
467__setup("dyntick=", dyntick_setup);
468#endif
469
470static int __init timer_init_sysfs(void) 368static int __init timer_init_sysfs(void)
471{ 369{
472 int ret = sysdev_class_register(&timer_sysclass); 370 int ret = sysdev_class_register(&timer_sysclass);
@@ -475,19 +373,6 @@ static int __init timer_init_sysfs(void)
475 ret = sysdev_register(&system_timer->dev); 373 ret = sysdev_register(&system_timer->dev);
476 } 374 }
477 375
478#ifdef CONFIG_NO_IDLE_HZ
479 if (ret == 0 && system_timer->dyn_tick) {
480 ret = sysdev_create_file(&system_timer->dev, &attr_dyn_tick);
481
482 /*
483 * Turn on dynamic tick after calibrate delay
484 * for correct bogomips
485 */
486 if (ret == 0 && dyntick_str[0] == 'e')
487 ret = timer_dyn_tick_enable();
488 }
489#endif
490
491 return ret; 376 return ret;
492} 377}
493 378
@@ -500,10 +385,5 @@ void __init time_init(void)
500 system_timer->offset = dummy_gettimeoffset; 385 system_timer->offset = dummy_gettimeoffset;
501#endif 386#endif
502 system_timer->init(); 387 system_timer->init();
503
504#ifdef CONFIG_NO_IDLE_HZ
505 if (system_timer->dyn_tick)
506 spin_lock_init(&system_timer->dyn_tick->lock);
507#endif
508} 388}
509 389